2022-12-24 23:49:50 +00:00
|
|
|
// Copyright (c) 2011-2022 The Bitcoin Core developers
|
2021-04-02 19:17:00 +02:00
|
|
|
// Distributed under the MIT software license, see the accompanying
|
|
|
|
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
|
|
|
|
2021-04-02 20:42:05 +02:00
|
|
|
#include <node/blockstorage.h>
|
2021-04-02 19:17:00 +02:00
|
|
|
|
2023-11-15 13:07:28 +10:00
|
|
|
#include <arith_uint256.h>
|
2021-04-02 20:42:05 +02:00
|
|
|
#include <chain.h>
|
2023-11-15 13:07:28 +10:00
|
|
|
#include <consensus/params.h>
|
2021-04-18 17:09:48 +02:00
|
|
|
#include <consensus/validation.h>
|
2023-08-01 12:01:02 +02:00
|
|
|
#include <dbwrapper.h>
|
2021-04-02 20:42:05 +02:00
|
|
|
#include <flatfile.h>
|
2021-04-18 17:09:48 +02:00
|
|
|
#include <hash.h>
|
2023-11-15 13:07:28 +10:00
|
|
|
#include <kernel/blockmanager_opts.h>
|
2022-03-10 22:13:58 -05:00
|
|
|
#include <kernel/chainparams.h>
|
2023-09-06 15:55:14 +02:00
|
|
|
#include <kernel/messagestartchars.h>
|
2023-11-15 13:07:28 +10:00
|
|
|
#include <kernel/notifications_interface.h>
|
2023-03-23 12:23:29 +01:00
|
|
|
#include <logging.h>
|
2021-04-02 20:42:05 +02:00
|
|
|
#include <pow.h>
|
2023-11-15 13:07:28 +10:00
|
|
|
#include <primitives/block.h>
|
|
|
|
#include <primitives/transaction.h>
|
2022-01-02 17:05:43 +01:00
|
|
|
#include <reverse_iterator.h>
|
2023-11-15 13:07:28 +10:00
|
|
|
#include <serialize.h>
|
2021-04-02 20:42:05 +02:00
|
|
|
#include <signet.h>
|
2023-11-15 13:07:28 +10:00
|
|
|
#include <span.h>
|
2021-04-02 20:42:05 +02:00
|
|
|
#include <streams.h>
|
2023-08-01 12:01:02 +02:00
|
|
|
#include <sync.h>
|
2023-11-15 13:07:28 +10:00
|
|
|
#include <tinyformat.h>
|
|
|
|
#include <uint256.h>
|
2021-04-18 17:09:48 +02:00
|
|
|
#include <undo.h>
|
2023-05-06 22:14:11 +02:00
|
|
|
#include <util/batchpriority.h>
|
2023-11-15 13:07:28 +10:00
|
|
|
#include <util/check.h>
|
2023-03-15 11:18:06 +01:00
|
|
|
#include <util/fs.h>
|
2023-05-17 12:43:23 +02:00
|
|
|
#include <util/signalinterrupt.h>
|
2023-09-06 16:59:32 +02:00
|
|
|
#include <util/strencodings.h>
|
2023-08-01 12:01:02 +02:00
|
|
|
#include <util/translation.h>
|
2021-04-02 19:17:00 +02:00
|
|
|
#include <validation.h>
|
|
|
|
|
2020-07-26 23:43:01 +03:00
|
|
|
#include <map>
|
2021-05-13 19:13:08 +02:00
|
|
|
#include <unordered_map>
|
|
|
|
|
2023-08-01 12:01:02 +02:00
|
|
|
namespace kernel {
|
|
|
|
static constexpr uint8_t DB_BLOCK_FILES{'f'};
|
|
|
|
static constexpr uint8_t DB_BLOCK_INDEX{'b'};
|
|
|
|
static constexpr uint8_t DB_FLAG{'F'};
|
|
|
|
static constexpr uint8_t DB_REINDEX_FLAG{'R'};
|
|
|
|
static constexpr uint8_t DB_LAST_BLOCK{'l'};
|
|
|
|
// Keys used in previous version that might still be found in the DB:
|
2023-08-02 07:50:22 +02:00
|
|
|
// BlockTreeDB::DB_TXINDEX_BLOCK{'T'};
|
|
|
|
// BlockTreeDB::DB_TXINDEX{'t'}
|
|
|
|
// BlockTreeDB::ReadFlag("txindex")
|
2023-08-01 12:01:02 +02:00
|
|
|
|
2023-08-02 07:50:22 +02:00
|
|
|
bool BlockTreeDB::ReadBlockFileInfo(int nFile, CBlockFileInfo& info)
|
2023-08-01 12:33:09 +02:00
|
|
|
{
|
2023-08-01 12:01:02 +02:00
|
|
|
return Read(std::make_pair(DB_BLOCK_FILES, nFile), info);
|
|
|
|
}
|
|
|
|
|
2023-08-02 07:50:22 +02:00
|
|
|
bool BlockTreeDB::WriteReindexing(bool fReindexing)
|
2023-08-01 12:33:09 +02:00
|
|
|
{
|
|
|
|
if (fReindexing) {
|
2023-08-01 12:01:02 +02:00
|
|
|
return Write(DB_REINDEX_FLAG, uint8_t{'1'});
|
2023-08-01 12:33:09 +02:00
|
|
|
} else {
|
2023-08-01 12:01:02 +02:00
|
|
|
return Erase(DB_REINDEX_FLAG);
|
2023-08-01 12:33:09 +02:00
|
|
|
}
|
2023-08-01 12:01:02 +02:00
|
|
|
}
|
|
|
|
|
2023-08-02 07:50:22 +02:00
|
|
|
void BlockTreeDB::ReadReindexing(bool& fReindexing)
|
2023-08-01 12:33:09 +02:00
|
|
|
{
|
2023-08-01 12:01:02 +02:00
|
|
|
fReindexing = Exists(DB_REINDEX_FLAG);
|
|
|
|
}
|
|
|
|
|
2023-08-02 07:50:22 +02:00
|
|
|
bool BlockTreeDB::ReadLastBlockFile(int& nFile)
|
2023-08-01 12:33:09 +02:00
|
|
|
{
|
2023-08-01 12:01:02 +02:00
|
|
|
return Read(DB_LAST_BLOCK, nFile);
|
|
|
|
}
|
|
|
|
|
2023-08-02 07:50:22 +02:00
|
|
|
bool BlockTreeDB::WriteBatchSync(const std::vector<std::pair<int, const CBlockFileInfo*>>& fileInfo, int nLastFile, const std::vector<const CBlockIndex*>& blockinfo)
|
2023-08-01 12:33:09 +02:00
|
|
|
{
|
2023-08-01 12:01:02 +02:00
|
|
|
CDBBatch batch(*this);
|
2023-08-01 12:33:09 +02:00
|
|
|
for (const auto& [file, info] : fileInfo) {
|
|
|
|
batch.Write(std::make_pair(DB_BLOCK_FILES, file), *info);
|
2023-08-01 12:01:02 +02:00
|
|
|
}
|
|
|
|
batch.Write(DB_LAST_BLOCK, nLastFile);
|
2023-08-01 12:33:09 +02:00
|
|
|
for (const CBlockIndex* bi : blockinfo) {
|
|
|
|
batch.Write(std::make_pair(DB_BLOCK_INDEX, bi->GetBlockHash()), CDiskBlockIndex{bi});
|
2023-08-01 12:01:02 +02:00
|
|
|
}
|
|
|
|
return WriteBatch(batch, true);
|
|
|
|
}
|
|
|
|
|
2023-08-02 07:50:22 +02:00
|
|
|
bool BlockTreeDB::WriteFlag(const std::string& name, bool fValue)
|
2023-08-01 12:33:09 +02:00
|
|
|
{
|
2023-08-01 12:01:02 +02:00
|
|
|
return Write(std::make_pair(DB_FLAG, name), fValue ? uint8_t{'1'} : uint8_t{'0'});
|
|
|
|
}
|
|
|
|
|
2023-08-02 07:50:22 +02:00
|
|
|
bool BlockTreeDB::ReadFlag(const std::string& name, bool& fValue)
|
2023-08-01 12:33:09 +02:00
|
|
|
{
|
2023-08-01 12:01:02 +02:00
|
|
|
uint8_t ch;
|
2023-08-01 12:33:09 +02:00
|
|
|
if (!Read(std::make_pair(DB_FLAG, name), ch)) {
|
2023-08-01 12:01:02 +02:00
|
|
|
return false;
|
2023-08-01 12:33:09 +02:00
|
|
|
}
|
2023-08-01 12:01:02 +02:00
|
|
|
fValue = ch == uint8_t{'1'};
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2023-08-02 07:50:22 +02:00
|
|
|
bool BlockTreeDB::LoadBlockIndexGuts(const Consensus::Params& consensusParams, std::function<CBlockIndex*(const uint256&)> insertBlockIndex, const util::SignalInterrupt& interrupt)
|
2023-08-01 12:01:02 +02:00
|
|
|
{
|
|
|
|
AssertLockHeld(::cs_main);
|
|
|
|
std::unique_ptr<CDBIterator> pcursor(NewIterator());
|
|
|
|
pcursor->Seek(std::make_pair(DB_BLOCK_INDEX, uint256()));
|
|
|
|
|
|
|
|
// Load m_block_index
|
|
|
|
while (pcursor->Valid()) {
|
|
|
|
if (interrupt) return false;
|
|
|
|
std::pair<uint8_t, uint256> key;
|
|
|
|
if (pcursor->GetKey(key) && key.first == DB_BLOCK_INDEX) {
|
|
|
|
CDiskBlockIndex diskindex;
|
|
|
|
if (pcursor->GetValue(diskindex)) {
|
|
|
|
// Construct block index object
|
|
|
|
CBlockIndex* pindexNew = insertBlockIndex(diskindex.ConstructBlockHash());
|
|
|
|
pindexNew->pprev = insertBlockIndex(diskindex.hashPrev);
|
|
|
|
pindexNew->nHeight = diskindex.nHeight;
|
|
|
|
pindexNew->nFile = diskindex.nFile;
|
|
|
|
pindexNew->nDataPos = diskindex.nDataPos;
|
|
|
|
pindexNew->nUndoPos = diskindex.nUndoPos;
|
|
|
|
pindexNew->nVersion = diskindex.nVersion;
|
|
|
|
pindexNew->hashMerkleRoot = diskindex.hashMerkleRoot;
|
|
|
|
pindexNew->nTime = diskindex.nTime;
|
|
|
|
pindexNew->nBits = diskindex.nBits;
|
|
|
|
pindexNew->nNonce = diskindex.nNonce;
|
|
|
|
pindexNew->nStatus = diskindex.nStatus;
|
|
|
|
pindexNew->nTx = diskindex.nTx;
|
|
|
|
|
|
|
|
if (!CheckProofOfWork(pindexNew->GetBlockHash(), pindexNew->nBits, consensusParams)) {
|
2024-01-11 19:43:27 +01:00
|
|
|
LogError("%s: CheckProofOfWork failed: %s\n", __func__, pindexNew->ToString());
|
2024-01-11 18:47:54 +01:00
|
|
|
return false;
|
2023-08-01 12:01:02 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
pcursor->Next();
|
|
|
|
} else {
|
2024-01-11 19:43:27 +01:00
|
|
|
LogError("%s: failed to read value\n", __func__);
|
2024-01-11 18:47:54 +01:00
|
|
|
return false;
|
2023-08-01 12:01:02 +02:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
} // namespace kernel
|
|
|
|
|
2021-11-12 10:06:00 -05:00
|
|
|
namespace node {
|
2021-04-18 09:46:01 +02:00
|
|
|
|
2022-03-15 19:17:36 -04:00
|
|
|
bool CBlockIndexWorkComparator::operator()(const CBlockIndex* pa, const CBlockIndex* pb) const
|
|
|
|
{
|
|
|
|
// First sort by most total work, ...
|
|
|
|
if (pa->nChainWork > pb->nChainWork) return false;
|
|
|
|
if (pa->nChainWork < pb->nChainWork) return true;
|
|
|
|
|
|
|
|
// ... then by earliest time received, ...
|
|
|
|
if (pa->nSequenceId < pb->nSequenceId) return false;
|
|
|
|
if (pa->nSequenceId > pb->nSequenceId) return true;
|
|
|
|
|
|
|
|
// Use pointer address as tie breaker (should only happen with blocks
|
|
|
|
// loaded from disk, as those all have id 0).
|
|
|
|
if (pa < pb) return false;
|
|
|
|
if (pa > pb) return true;
|
|
|
|
|
|
|
|
// Identical blocks.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2022-03-15 19:19:58 -04:00
|
|
|
bool CBlockIndexHeightOnlyComparator::operator()(const CBlockIndex* pa, const CBlockIndex* pb) const
|
|
|
|
{
|
|
|
|
return pa->nHeight < pb->nHeight;
|
|
|
|
}
|
|
|
|
|
2022-03-15 19:28:46 -04:00
|
|
|
std::vector<CBlockIndex*> BlockManager::GetAllBlockIndices()
|
|
|
|
{
|
|
|
|
AssertLockHeld(cs_main);
|
|
|
|
std::vector<CBlockIndex*> rv;
|
|
|
|
rv.reserve(m_block_index.size());
|
|
|
|
for (auto& [_, block_index] : m_block_index) {
|
|
|
|
rv.push_back(&block_index);
|
|
|
|
}
|
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
|
2021-01-08 18:56:48 -05:00
|
|
|
CBlockIndex* BlockManager::LookupBlockIndex(const uint256& hash)
|
|
|
|
{
|
|
|
|
AssertLockHeld(cs_main);
|
|
|
|
BlockMap::iterator it = m_block_index.find(hash);
|
|
|
|
return it == m_block_index.end() ? nullptr : &it->second;
|
|
|
|
}
|
|
|
|
|
|
|
|
const CBlockIndex* BlockManager::LookupBlockIndex(const uint256& hash) const
|
2022-01-02 17:05:43 +01:00
|
|
|
{
|
|
|
|
AssertLockHeld(cs_main);
|
|
|
|
BlockMap::const_iterator it = m_block_index.find(hash);
|
2021-01-08 18:56:48 -05:00
|
|
|
return it == m_block_index.end() ? nullptr : &it->second;
|
2022-01-02 17:05:43 +01:00
|
|
|
}
|
|
|
|
|
2020-12-24 16:18:46 -05:00
|
|
|
CBlockIndex* BlockManager::AddToBlockIndex(const CBlockHeader& block, CBlockIndex*& best_header)
|
2022-01-02 17:05:43 +01:00
|
|
|
{
|
|
|
|
AssertLockHeld(cs_main);
|
|
|
|
|
2022-01-19 13:55:40 -05:00
|
|
|
auto [mi, inserted] = m_block_index.try_emplace(block.GetHash(), block);
|
|
|
|
if (!inserted) {
|
|
|
|
return &mi->second;
|
2022-01-02 16:59:07 +01:00
|
|
|
}
|
2022-01-19 13:55:40 -05:00
|
|
|
CBlockIndex* pindexNew = &(*mi).second;
|
2022-01-02 17:05:43 +01:00
|
|
|
|
|
|
|
// We assign the sequence id to blocks only when the full data is available,
|
|
|
|
// to avoid miners withholding blocks but broadcasting headers, to get a
|
|
|
|
// competitive advantage.
|
2022-01-19 13:55:40 -05:00
|
|
|
pindexNew->nSequenceId = 0;
|
2021-01-08 18:56:48 -05:00
|
|
|
|
2022-01-02 17:05:43 +01:00
|
|
|
pindexNew->phashBlock = &((*mi).first);
|
|
|
|
BlockMap::iterator miPrev = m_block_index.find(block.hashPrevBlock);
|
2022-01-02 16:59:07 +01:00
|
|
|
if (miPrev != m_block_index.end()) {
|
2021-01-08 18:56:48 -05:00
|
|
|
pindexNew->pprev = &(*miPrev).second;
|
2022-01-02 17:05:43 +01:00
|
|
|
pindexNew->nHeight = pindexNew->pprev->nHeight + 1;
|
|
|
|
pindexNew->BuildSkip();
|
|
|
|
}
|
|
|
|
pindexNew->nTimeMax = (pindexNew->pprev ? std::max(pindexNew->pprev->nTimeMax, pindexNew->nTime) : pindexNew->nTime);
|
|
|
|
pindexNew->nChainWork = (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) + GetBlockProof(*pindexNew);
|
|
|
|
pindexNew->RaiseValidity(BLOCK_VALID_TREE);
|
2020-12-24 16:18:46 -05:00
|
|
|
if (best_header == nullptr || best_header->nChainWork < pindexNew->nChainWork) {
|
|
|
|
best_header = pindexNew;
|
|
|
|
}
|
2022-01-02 17:05:43 +01:00
|
|
|
|
2022-01-05 15:44:16 +01:00
|
|
|
m_dirty_blockindex.insert(pindexNew);
|
2022-01-02 17:05:43 +01:00
|
|
|
|
|
|
|
return pindexNew;
|
|
|
|
}
|
|
|
|
|
|
|
|
void BlockManager::PruneOneBlockFile(const int fileNumber)
|
|
|
|
{
|
|
|
|
AssertLockHeld(cs_main);
|
|
|
|
LOCK(cs_LastBlockFile);
|
|
|
|
|
2021-01-08 18:56:48 -05:00
|
|
|
for (auto& entry : m_block_index) {
|
|
|
|
CBlockIndex* pindex = &entry.second;
|
2022-01-02 17:05:43 +01:00
|
|
|
if (pindex->nFile == fileNumber) {
|
|
|
|
pindex->nStatus &= ~BLOCK_HAVE_DATA;
|
|
|
|
pindex->nStatus &= ~BLOCK_HAVE_UNDO;
|
|
|
|
pindex->nFile = 0;
|
|
|
|
pindex->nDataPos = 0;
|
|
|
|
pindex->nUndoPos = 0;
|
2022-01-05 15:44:16 +01:00
|
|
|
m_dirty_blockindex.insert(pindex);
|
2022-01-02 17:05:43 +01:00
|
|
|
|
|
|
|
// Prune from m_blocks_unlinked -- any block we prune would have
|
|
|
|
// to be downloaded again in order to consider its chain, at which
|
|
|
|
// point it would be considered as a candidate for
|
|
|
|
// m_blocks_unlinked or setBlockIndexCandidates.
|
|
|
|
auto range = m_blocks_unlinked.equal_range(pindex->pprev);
|
|
|
|
while (range.first != range.second) {
|
2022-01-02 16:59:07 +01:00
|
|
|
std::multimap<CBlockIndex*, CBlockIndex*>::iterator _it = range.first;
|
2022-01-02 17:05:43 +01:00
|
|
|
range.first++;
|
|
|
|
if (_it->second == pindex) {
|
|
|
|
m_blocks_unlinked.erase(_it);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-10-20 13:28:11 +02:00
|
|
|
m_blockfile_info.at(fileNumber) = CBlockFileInfo{};
|
2022-01-05 15:44:16 +01:00
|
|
|
m_dirty_fileinfo.insert(fileNumber);
|
2022-01-02 17:05:43 +01:00
|
|
|
}
|
|
|
|
|
2019-09-16 16:34:45 -04:00
|
|
|
void BlockManager::FindFilesToPruneManual(
|
|
|
|
std::set<int>& setFilesToPrune,
|
|
|
|
int nManualPruneHeight,
|
|
|
|
const Chainstate& chain,
|
|
|
|
ChainstateManager& chainman)
|
2022-01-02 17:05:43 +01:00
|
|
|
{
|
2023-01-03 12:47:38 +01:00
|
|
|
assert(IsPruneMode() && nManualPruneHeight > 0);
|
2022-01-02 17:05:43 +01:00
|
|
|
|
|
|
|
LOCK2(cs_main, cs_LastBlockFile);
|
2019-09-16 16:34:45 -04:00
|
|
|
if (chain.m_chain.Height() < 0) {
|
2022-01-02 17:05:43 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-09-16 16:34:45 -04:00
|
|
|
const auto [min_block_to_prune, last_block_can_prune] = chainman.GetPruneRange(chain, nManualPruneHeight);
|
|
|
|
|
2022-01-02 17:05:43 +01:00
|
|
|
int count = 0;
|
2023-05-03 14:55:03 -04:00
|
|
|
for (int fileNumber = 0; fileNumber < this->MaxBlockfileNum(); fileNumber++) {
|
2019-09-16 16:34:45 -04:00
|
|
|
const auto& fileinfo = m_blockfile_info[fileNumber];
|
|
|
|
if (fileinfo.nSize == 0 || fileinfo.nHeightLast > (unsigned)last_block_can_prune || fileinfo.nHeightFirst < (unsigned)min_block_to_prune) {
|
2022-01-02 17:05:43 +01:00
|
|
|
continue;
|
|
|
|
}
|
2019-09-16 16:34:45 -04:00
|
|
|
|
2022-01-02 17:05:43 +01:00
|
|
|
PruneOneBlockFile(fileNumber);
|
|
|
|
setFilesToPrune.insert(fileNumber);
|
|
|
|
count++;
|
|
|
|
}
|
2019-09-16 16:34:45 -04:00
|
|
|
LogPrintf("[%s] Prune (Manual): prune_height=%d removed %d blk/rev pairs\n",
|
|
|
|
chain.GetRole(), last_block_can_prune, count);
|
2022-01-02 17:05:43 +01:00
|
|
|
}
|
|
|
|
|
2019-09-16 16:34:45 -04:00
|
|
|
void BlockManager::FindFilesToPrune(
|
|
|
|
std::set<int>& setFilesToPrune,
|
|
|
|
int last_prune,
|
|
|
|
const Chainstate& chain,
|
|
|
|
ChainstateManager& chainman)
|
2022-01-02 17:05:43 +01:00
|
|
|
{
|
|
|
|
LOCK2(cs_main, cs_LastBlockFile);
|
2019-09-16 16:34:45 -04:00
|
|
|
// Distribute our -prune budget over all chainstates.
|
|
|
|
const auto target = std::max(
|
|
|
|
MIN_DISK_SPACE_FOR_BLOCK_FILES, GetPruneTarget() / chainman.GetAll().size());
|
2021-01-02 01:46:00 +00:00
|
|
|
const uint64_t target_sync_height = chainman.m_best_header->nHeight;
|
2019-09-16 16:34:45 -04:00
|
|
|
|
|
|
|
if (chain.m_chain.Height() < 0 || target == 0) {
|
2022-01-02 17:05:43 +01:00
|
|
|
return;
|
|
|
|
}
|
2019-09-16 16:34:45 -04:00
|
|
|
if (static_cast<uint64_t>(chain.m_chain.Height()) <= chainman.GetParams().PruneAfterHeight()) {
|
2022-01-02 17:05:43 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-09-16 16:34:45 -04:00
|
|
|
const auto [min_block_to_prune, last_block_can_prune] = chainman.GetPruneRange(chain, last_prune);
|
|
|
|
|
2022-01-02 17:05:43 +01:00
|
|
|
uint64_t nCurrentUsage = CalculateCurrentUsage();
|
|
|
|
// We don't check to prune until after we've allocated new space for files
|
|
|
|
// So we should leave a buffer under our target to account for another allocation
|
|
|
|
// before the next pruning.
|
|
|
|
uint64_t nBuffer = BLOCKFILE_CHUNK_SIZE + UNDOFILE_CHUNK_SIZE;
|
|
|
|
uint64_t nBytesToPrune;
|
|
|
|
int count = 0;
|
|
|
|
|
2019-09-16 16:34:45 -04:00
|
|
|
if (nCurrentUsage + nBuffer >= target) {
|
2022-01-02 17:05:43 +01:00
|
|
|
// On a prune event, the chainstate DB is flushed.
|
|
|
|
// To avoid excessive prune events negating the benefit of high dbcache
|
|
|
|
// values, we should not prune too rapidly.
|
2021-01-02 01:46:00 +00:00
|
|
|
// So when pruning in IBD, increase the buffer to avoid a re-prune too soon.
|
|
|
|
const auto chain_tip_height = chain.m_chain.Height();
|
|
|
|
if (chainman.IsInitialBlockDownload() && target_sync_height > (uint64_t)chain_tip_height) {
|
|
|
|
// Since this is only relevant during IBD, we assume blocks are at least 1 MB on average
|
|
|
|
static constexpr uint64_t average_block_size = 1000000; /* 1 MB */
|
|
|
|
const uint64_t remaining_blocks = target_sync_height - chain_tip_height;
|
|
|
|
nBuffer += average_block_size * remaining_blocks;
|
2022-01-02 17:05:43 +01:00
|
|
|
}
|
|
|
|
|
2023-05-03 14:55:03 -04:00
|
|
|
for (int fileNumber = 0; fileNumber < this->MaxBlockfileNum(); fileNumber++) {
|
2019-09-16 16:34:45 -04:00
|
|
|
const auto& fileinfo = m_blockfile_info[fileNumber];
|
|
|
|
nBytesToPrune = fileinfo.nSize + fileinfo.nUndoSize;
|
2022-01-02 17:05:43 +01:00
|
|
|
|
2019-09-16 16:34:45 -04:00
|
|
|
if (fileinfo.nSize == 0) {
|
2022-01-02 17:05:43 +01:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2019-09-16 16:34:45 -04:00
|
|
|
if (nCurrentUsage + nBuffer < target) { // are we below our target?
|
2022-01-02 17:05:43 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-09-16 16:34:45 -04:00
|
|
|
// don't prune files that could have a block that's not within the allowable
|
|
|
|
// prune range for the chain being pruned.
|
|
|
|
if (fileinfo.nHeightLast > (unsigned)last_block_can_prune || fileinfo.nHeightFirst < (unsigned)min_block_to_prune) {
|
2022-01-02 17:05:43 +01:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
PruneOneBlockFile(fileNumber);
|
|
|
|
// Queue up the files for removal
|
|
|
|
setFilesToPrune.insert(fileNumber);
|
|
|
|
nCurrentUsage -= nBytesToPrune;
|
|
|
|
count++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-16 16:34:45 -04:00
|
|
|
LogPrint(BCLog::PRUNE, "[%s] target=%dMiB actual=%dMiB diff=%dMiB min_height=%d max_prune_height=%d removed %d blk/rev pairs\n",
|
|
|
|
chain.GetRole(), target / 1024 / 1024, nCurrentUsage / 1024 / 1024,
|
|
|
|
(int64_t(target) - int64_t(nCurrentUsage)) / 1024 / 1024,
|
|
|
|
min_block_to_prune, last_block_can_prune, count);
|
2022-01-02 17:05:43 +01:00
|
|
|
}
|
|
|
|
|
2021-05-13 19:13:08 +02:00
|
|
|
void BlockManager::UpdatePruneLock(const std::string& name, const PruneLockInfo& lock_info) {
|
|
|
|
AssertLockHeld(::cs_main);
|
|
|
|
m_prune_locks[name] = lock_info;
|
|
|
|
}
|
|
|
|
|
2022-01-02 16:59:07 +01:00
|
|
|
CBlockIndex* BlockManager::InsertBlockIndex(const uint256& hash)
|
2022-01-02 17:05:43 +01:00
|
|
|
{
|
|
|
|
AssertLockHeld(cs_main);
|
|
|
|
|
2022-01-02 16:59:07 +01:00
|
|
|
if (hash.IsNull()) {
|
2022-01-02 17:05:43 +01:00
|
|
|
return nullptr;
|
2022-01-02 16:59:07 +01:00
|
|
|
}
|
2022-01-02 17:05:43 +01:00
|
|
|
|
2022-03-03 15:05:15 -05:00
|
|
|
const auto [mi, inserted]{m_block_index.try_emplace(hash)};
|
2022-01-13 12:37:06 -05:00
|
|
|
CBlockIndex* pindex = &(*mi).second;
|
|
|
|
if (inserted) {
|
|
|
|
pindex->phashBlock = &((*mi).first);
|
2022-01-02 16:59:07 +01:00
|
|
|
}
|
2022-01-13 12:37:06 -05:00
|
|
|
return pindex;
|
2022-01-02 17:05:43 +01:00
|
|
|
}
|
|
|
|
|
2023-05-05 15:54:13 -04:00
|
|
|
bool BlockManager::LoadBlockIndex(const std::optional<uint256>& snapshot_blockhash)
|
2022-01-02 17:05:43 +01:00
|
|
|
{
|
2023-05-17 12:43:23 +02:00
|
|
|
if (!m_block_tree_db->LoadBlockIndexGuts(
|
|
|
|
GetConsensus(), [this](const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { return this->InsertBlockIndex(hash); }, m_interrupt)) {
|
2022-01-02 17:05:43 +01:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2023-05-05 15:54:13 -04:00
|
|
|
if (snapshot_blockhash) {
|
2023-10-18 20:17:42 -03:00
|
|
|
const std::optional<AssumeutxoData> maybe_au_data = GetParams().AssumeutxoForBlockhash(*snapshot_blockhash);
|
|
|
|
if (!maybe_au_data) {
|
2024-03-15 21:42:44 +01:00
|
|
|
m_opts.notifications.fatalError(strprintf(_("Assumeutxo data not found for the given blockhash '%s'."), snapshot_blockhash->ToString()));
|
2023-10-18 20:17:42 -03:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
const AssumeutxoData& au_data = *Assert(maybe_au_data);
|
2023-05-03 14:55:03 -04:00
|
|
|
m_snapshot_height = au_data.height;
|
2023-05-05 15:54:13 -04:00
|
|
|
CBlockIndex* base{LookupBlockIndex(*snapshot_blockhash)};
|
|
|
|
|
2023-05-03 14:55:03 -04:00
|
|
|
// Since nChainTx (responsible for estimated progress) isn't persisted
|
2023-05-05 15:54:13 -04:00
|
|
|
// to disk, we must bootstrap the value for assumedvalid chainstates
|
|
|
|
// from the hardcoded assumeutxo chainparams.
|
|
|
|
base->nChainTx = au_data.nChainTx;
|
|
|
|
LogPrintf("[snapshot] set nChainTx=%d for %s\n", au_data.nChainTx, snapshot_blockhash->ToString());
|
2023-05-03 14:55:03 -04:00
|
|
|
} else {
|
|
|
|
// If this isn't called with a snapshot blockhash, make sure the cached snapshot height
|
|
|
|
// is null. This is relevant during snapshot completion, when the blockman may be loaded
|
|
|
|
// with a height that then needs to be cleared after the snapshot is fully validated.
|
|
|
|
m_snapshot_height.reset();
|
2023-05-05 15:54:13 -04:00
|
|
|
}
|
|
|
|
|
2023-05-03 14:55:03 -04:00
|
|
|
Assert(m_snapshot_height.has_value() == snapshot_blockhash.has_value());
|
|
|
|
|
2022-01-02 17:05:43 +01:00
|
|
|
// Calculate nChainWork
|
2022-03-15 19:28:46 -04:00
|
|
|
std::vector<CBlockIndex*> vSortedByHeight{GetAllBlockIndices()};
|
2022-03-15 19:19:58 -04:00
|
|
|
std::sort(vSortedByHeight.begin(), vSortedByHeight.end(),
|
|
|
|
CBlockIndexHeightOnlyComparator());
|
2022-01-02 17:05:43 +01:00
|
|
|
|
2023-06-02 16:36:01 -04:00
|
|
|
CBlockIndex* previous_index{nullptr};
|
2022-03-07 21:32:12 -05:00
|
|
|
for (CBlockIndex* pindex : vSortedByHeight) {
|
2023-05-17 12:43:23 +02:00
|
|
|
if (m_interrupt) return false;
|
2023-06-02 16:36:01 -04:00
|
|
|
if (previous_index && pindex->nHeight > previous_index->nHeight + 1) {
|
2024-01-11 19:43:27 +01:00
|
|
|
LogError("%s: block index is non-contiguous, index of height %d missing\n", __func__, previous_index->nHeight + 1);
|
2024-01-11 18:47:54 +01:00
|
|
|
return false;
|
2023-06-02 16:36:01 -04:00
|
|
|
}
|
|
|
|
previous_index = pindex;
|
2022-01-02 17:05:43 +01:00
|
|
|
pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) + GetBlockProof(*pindex);
|
|
|
|
pindex->nTimeMax = (pindex->pprev ? std::max(pindex->pprev->nTimeMax, pindex->nTime) : pindex->nTime);
|
|
|
|
|
|
|
|
// We can link the chain of blocks for which we've received transactions at some point, or
|
|
|
|
// blocks that are assumed-valid on the basis of snapshot load (see
|
|
|
|
// PopulateAndValidateSnapshot()).
|
|
|
|
// Pruned nodes may have deleted the block.
|
|
|
|
if (pindex->nTx > 0) {
|
|
|
|
if (pindex->pprev) {
|
2023-05-03 14:55:03 -04:00
|
|
|
if (m_snapshot_height && pindex->nHeight == *m_snapshot_height &&
|
2023-05-05 15:54:13 -04:00
|
|
|
pindex->GetBlockHash() == *snapshot_blockhash) {
|
|
|
|
// Should have been set above; don't disturb it with code below.
|
|
|
|
Assert(pindex->nChainTx > 0);
|
|
|
|
} else if (pindex->pprev->nChainTx > 0) {
|
2022-01-02 17:05:43 +01:00
|
|
|
pindex->nChainTx = pindex->pprev->nChainTx + pindex->nTx;
|
|
|
|
} else {
|
|
|
|
pindex->nChainTx = 0;
|
|
|
|
m_blocks_unlinked.insert(std::make_pair(pindex->pprev, pindex));
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
pindex->nChainTx = pindex->nTx;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!(pindex->nStatus & BLOCK_FAILED_MASK) && pindex->pprev && (pindex->pprev->nStatus & BLOCK_FAILED_MASK)) {
|
|
|
|
pindex->nStatus |= BLOCK_FAILED_CHILD;
|
2022-01-05 15:44:16 +01:00
|
|
|
m_dirty_blockindex.insert(pindex);
|
2022-01-02 17:05:43 +01:00
|
|
|
}
|
2022-01-02 16:59:07 +01:00
|
|
|
if (pindex->pprev) {
|
2022-01-02 17:05:43 +01:00
|
|
|
pindex->BuildSkip();
|
2022-01-02 16:59:07 +01:00
|
|
|
}
|
2022-01-02 17:05:43 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2022-01-05 15:06:56 +01:00
|
|
|
bool BlockManager::WriteBlockIndexDB()
|
|
|
|
{
|
2022-01-07 13:10:18 +01:00
|
|
|
AssertLockHeld(::cs_main);
|
2022-01-05 15:06:56 +01:00
|
|
|
std::vector<std::pair<int, const CBlockFileInfo*>> vFiles;
|
2022-01-05 15:44:16 +01:00
|
|
|
vFiles.reserve(m_dirty_fileinfo.size());
|
|
|
|
for (std::set<int>::iterator it = m_dirty_fileinfo.begin(); it != m_dirty_fileinfo.end();) {
|
2023-10-04 13:53:40 +02:00
|
|
|
vFiles.emplace_back(*it, &m_blockfile_info[*it]);
|
2022-01-05 15:44:16 +01:00
|
|
|
m_dirty_fileinfo.erase(it++);
|
2022-01-05 15:06:56 +01:00
|
|
|
}
|
|
|
|
std::vector<const CBlockIndex*> vBlocks;
|
2022-01-05 15:44:16 +01:00
|
|
|
vBlocks.reserve(m_dirty_blockindex.size());
|
|
|
|
for (std::set<CBlockIndex*>::iterator it = m_dirty_blockindex.begin(); it != m_dirty_blockindex.end();) {
|
2022-01-05 15:06:56 +01:00
|
|
|
vBlocks.push_back(*it);
|
2022-01-05 15:44:16 +01:00
|
|
|
m_dirty_blockindex.erase(it++);
|
2022-01-05 15:06:56 +01:00
|
|
|
}
|
2023-05-03 14:55:03 -04:00
|
|
|
int max_blockfile = WITH_LOCK(cs_LastBlockFile, return this->MaxBlockfileNum());
|
|
|
|
if (!m_block_tree_db->WriteBatchSync(vFiles, max_blockfile, vBlocks)) {
|
2022-01-05 15:06:56 +01:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
2022-01-02 17:05:43 +01:00
|
|
|
}
|
|
|
|
|
2023-05-05 15:54:13 -04:00
|
|
|
bool BlockManager::LoadBlockIndexDB(const std::optional<uint256>& snapshot_blockhash)
|
2022-01-02 17:05:43 +01:00
|
|
|
{
|
2023-05-05 15:54:13 -04:00
|
|
|
if (!LoadBlockIndex(snapshot_blockhash)) {
|
2022-01-02 17:05:43 +01:00
|
|
|
return false;
|
|
|
|
}
|
2023-05-03 14:55:03 -04:00
|
|
|
int max_blockfile_num{0};
|
2022-01-02 17:05:43 +01:00
|
|
|
|
|
|
|
// Load block file info
|
2023-05-03 14:55:03 -04:00
|
|
|
m_block_tree_db->ReadLastBlockFile(max_blockfile_num);
|
|
|
|
m_blockfile_info.resize(max_blockfile_num + 1);
|
|
|
|
LogPrintf("%s: last block file = %i\n", __func__, max_blockfile_num);
|
|
|
|
for (int nFile = 0; nFile <= max_blockfile_num; nFile++) {
|
2022-01-05 15:44:16 +01:00
|
|
|
m_block_tree_db->ReadBlockFileInfo(nFile, m_blockfile_info[nFile]);
|
|
|
|
}
|
2023-05-03 14:55:03 -04:00
|
|
|
LogPrintf("%s: last block file info: %s\n", __func__, m_blockfile_info[max_blockfile_num].ToString());
|
|
|
|
for (int nFile = max_blockfile_num + 1; true; nFile++) {
|
2022-01-02 17:05:43 +01:00
|
|
|
CBlockFileInfo info;
|
|
|
|
if (m_block_tree_db->ReadBlockFileInfo(nFile, info)) {
|
2022-01-05 15:44:16 +01:00
|
|
|
m_blockfile_info.push_back(info);
|
2022-01-02 17:05:43 +01:00
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check presence of blk files
|
|
|
|
LogPrintf("Checking all blk files are present...\n");
|
|
|
|
std::set<int> setBlkDataFiles;
|
2022-01-13 12:44:19 -05:00
|
|
|
for (const auto& [_, block_index] : m_block_index) {
|
2022-03-03 15:05:15 -05:00
|
|
|
if (block_index.nStatus & BLOCK_HAVE_DATA) {
|
|
|
|
setBlkDataFiles.insert(block_index.nFile);
|
2022-01-02 17:05:43 +01:00
|
|
|
}
|
|
|
|
}
|
2022-01-02 16:59:07 +01:00
|
|
|
for (std::set<int>::iterator it = setBlkDataFiles.begin(); it != setBlkDataFiles.end(); it++) {
|
2022-01-02 17:05:43 +01:00
|
|
|
FlatFilePos pos(*it, 0);
|
2023-06-30 16:25:13 +02:00
|
|
|
if (OpenBlockFile(pos, true).IsNull()) {
|
2022-01-02 17:05:43 +01:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-05-03 14:55:03 -04:00
|
|
|
{
|
|
|
|
// Initialize the blockfile cursors.
|
|
|
|
LOCK(cs_LastBlockFile);
|
|
|
|
for (size_t i = 0; i < m_blockfile_info.size(); ++i) {
|
|
|
|
const auto last_height_in_file = m_blockfile_info[i].nHeightLast;
|
|
|
|
m_blockfile_cursors[BlockfileTypeForHeight(last_height_in_file)] = {static_cast<int>(i), 0};
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-02 17:05:43 +01:00
|
|
|
// Check whether we have ever pruned block & undo files
|
2022-03-18 12:35:52 -04:00
|
|
|
m_block_tree_db->ReadFlag("prunedblockfiles", m_have_pruned);
|
|
|
|
if (m_have_pruned) {
|
2022-01-02 17:05:43 +01:00
|
|
|
LogPrintf("LoadBlockIndexDB(): Block files have previously been pruned\n");
|
2022-01-02 16:59:07 +01:00
|
|
|
}
|
2022-01-02 17:05:43 +01:00
|
|
|
|
|
|
|
// Check whether we need to continue reindexing
|
|
|
|
bool fReindexing = false;
|
|
|
|
m_block_tree_db->ReadReindexing(fReindexing);
|
2024-04-05 12:44:24 +02:00
|
|
|
if (fReindexing) m_reindexing = true;
|
2022-01-02 17:05:43 +01:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2022-12-20 12:25:36 -05:00
|
|
|
void BlockManager::ScanAndUnlinkAlreadyPrunedFiles()
|
|
|
|
{
|
|
|
|
AssertLockHeld(::cs_main);
|
2023-05-03 14:55:03 -04:00
|
|
|
int max_blockfile = WITH_LOCK(cs_LastBlockFile, return this->MaxBlockfileNum());
|
2022-12-20 12:25:36 -05:00
|
|
|
if (!m_have_pruned) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::set<int> block_files_to_prune;
|
2023-05-03 14:55:03 -04:00
|
|
|
for (int file_number = 0; file_number < max_blockfile; file_number++) {
|
2022-12-20 12:25:36 -05:00
|
|
|
if (m_blockfile_info[file_number].nSize == 0) {
|
|
|
|
block_files_to_prune.insert(file_number);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
UnlinkPrunedFiles(block_files_to_prune);
|
|
|
|
}
|
|
|
|
|
2022-03-02 15:42:57 +10:00
|
|
|
const CBlockIndex* BlockManager::GetLastCheckpoint(const CCheckpointData& data)
|
2022-01-02 17:05:43 +01:00
|
|
|
{
|
|
|
|
const MapCheckpoints& checkpoints = data.mapCheckpoints;
|
|
|
|
|
2022-01-02 16:59:07 +01:00
|
|
|
for (const MapCheckpoints::value_type& i : reverse_iterate(checkpoints)) {
|
2022-01-02 17:05:43 +01:00
|
|
|
const uint256& hash = i.second;
|
2022-03-02 15:42:57 +10:00
|
|
|
const CBlockIndex* pindex = LookupBlockIndex(hash);
|
2022-01-02 17:05:43 +01:00
|
|
|
if (pindex) {
|
|
|
|
return pindex;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2023-12-07 10:21:14 +01:00
|
|
|
bool BlockManager::IsBlockPruned(const CBlockIndex& block)
|
2021-04-18 09:46:01 +02:00
|
|
|
{
|
2022-01-18 12:51:03 +01:00
|
|
|
AssertLockHeld(::cs_main);
|
2023-12-07 10:21:14 +01:00
|
|
|
return m_have_pruned && !(block.nStatus & BLOCK_HAVE_DATA) && (block.nTx > 0);
|
2021-04-18 09:46:01 +02:00
|
|
|
}
|
|
|
|
|
2023-05-16 19:19:06 -03:00
|
|
|
const CBlockIndex* BlockManager::GetFirstStoredBlock(const CBlockIndex& upper_block, const CBlockIndex* lower_block)
|
2022-04-27 12:21:22 +02:00
|
|
|
{
|
2021-04-18 16:48:52 +02:00
|
|
|
AssertLockHeld(::cs_main);
|
2023-05-16 19:19:06 -03:00
|
|
|
const CBlockIndex* last_block = &upper_block;
|
|
|
|
assert(last_block->nStatus & BLOCK_HAVE_DATA); // 'upper_block' must have data
|
2021-04-18 16:48:52 +02:00
|
|
|
while (last_block->pprev && (last_block->pprev->nStatus & BLOCK_HAVE_DATA)) {
|
2023-05-16 19:19:06 -03:00
|
|
|
if (lower_block) {
|
|
|
|
// Return if we reached the lower_block
|
|
|
|
if (last_block == lower_block) return lower_block;
|
|
|
|
// if range was surpassed, means that 'lower_block' is not part of the 'upper_block' chain
|
|
|
|
// and so far this is not allowed.
|
|
|
|
assert(last_block->nHeight >= lower_block->nHeight);
|
|
|
|
}
|
2021-04-18 16:48:52 +02:00
|
|
|
last_block = last_block->pprev;
|
|
|
|
}
|
2023-05-16 19:19:06 -03:00
|
|
|
assert(last_block != nullptr);
|
2021-04-18 16:48:52 +02:00
|
|
|
return last_block;
|
|
|
|
}
|
|
|
|
|
2023-05-16 19:19:06 -03:00
|
|
|
bool BlockManager::CheckBlockDataAvailability(const CBlockIndex& upper_block, const CBlockIndex& lower_block)
|
|
|
|
{
|
|
|
|
if (!(upper_block.nStatus & BLOCK_HAVE_DATA)) return false;
|
|
|
|
return GetFirstStoredBlock(upper_block, &lower_block) == &lower_block;
|
|
|
|
}
|
|
|
|
|
2021-04-18 09:46:01 +02:00
|
|
|
// If we're using -prune with -reindex, then delete block files that will be ignored by the
|
|
|
|
// reindex. Since reindexing works by starting at block file 0 and looping until a blockfile
|
|
|
|
// is missing, do the same here to delete any later block files after a gap. Also delete all
|
2022-01-05 15:44:16 +01:00
|
|
|
// rev files since they'll be rewritten by the reindex anyway. This ensures that m_blockfile_info
|
2021-04-18 09:46:01 +02:00
|
|
|
// is in sync with what's actually on disk by the time we start downloading, so that pruning
|
|
|
|
// works correctly.
|
2023-02-18 15:49:41 +01:00
|
|
|
void BlockManager::CleanupBlockRevFiles() const
|
2021-04-18 09:46:01 +02:00
|
|
|
{
|
|
|
|
std::map<std::string, fs::path> mapBlockFiles;
|
|
|
|
|
|
|
|
// Glob all blk?????.dat and rev?????.dat files from the blocks directory.
|
|
|
|
// Remove the rev files immediately and insert the blk file paths into an
|
|
|
|
// ordered map keyed by block file index.
|
|
|
|
LogPrintf("Removing unusable blk?????.dat and rev?????.dat files for -reindex with -prune\n");
|
2023-02-18 18:17:33 +01:00
|
|
|
for (fs::directory_iterator it(m_opts.blocks_dir); it != fs::directory_iterator(); it++) {
|
2021-09-10 00:17:20 -04:00
|
|
|
const std::string path = fs::PathToString(it->path().filename());
|
2021-04-18 09:46:01 +02:00
|
|
|
if (fs::is_regular_file(*it) &&
|
2021-09-10 00:17:20 -04:00
|
|
|
path.length() == 12 &&
|
|
|
|
path.substr(8,4) == ".dat")
|
2021-04-18 09:46:01 +02:00
|
|
|
{
|
2021-09-10 00:17:20 -04:00
|
|
|
if (path.substr(0, 3) == "blk") {
|
|
|
|
mapBlockFiles[path.substr(3, 5)] = it->path();
|
|
|
|
} else if (path.substr(0, 3) == "rev") {
|
2021-04-18 09:46:01 +02:00
|
|
|
remove(it->path());
|
2021-04-19 08:45:35 +02:00
|
|
|
}
|
2021-04-18 09:46:01 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove all block files that aren't part of a contiguous set starting at
|
|
|
|
// zero by walking the ordered map (keys are block file indices) by
|
|
|
|
// keeping a separate counter. Once we hit a gap (or if 0 doesn't exist)
|
|
|
|
// start removing block files.
|
|
|
|
int nContigCounter = 0;
|
|
|
|
for (const std::pair<const std::string, fs::path>& item : mapBlockFiles) {
|
2021-09-30 14:18:50 +00:00
|
|
|
if (LocaleIndependentAtoi<int>(item.first) == nContigCounter) {
|
2021-04-18 09:46:01 +02:00
|
|
|
nContigCounter++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
remove(item.second);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-04 14:04:30 +01:00
|
|
|
CBlockFileInfo* BlockManager::GetBlockFileInfo(size_t n)
|
2021-04-18 17:09:48 +02:00
|
|
|
{
|
|
|
|
LOCK(cs_LastBlockFile);
|
|
|
|
|
2022-01-05 15:44:16 +01:00
|
|
|
return &m_blockfile_info.at(n);
|
2021-04-18 17:09:48 +02:00
|
|
|
}
|
|
|
|
|
2023-07-31 14:31:11 +02:00
|
|
|
bool BlockManager::UndoWriteToDisk(const CBlockUndo& blockundo, FlatFilePos& pos, const uint256& hashBlock) const
|
2021-04-18 17:09:48 +02:00
|
|
|
{
|
|
|
|
// Open history file to append
|
2023-11-15 13:07:28 +10:00
|
|
|
AutoFile fileout{OpenUndoFile(pos)};
|
2021-04-19 08:45:35 +02:00
|
|
|
if (fileout.IsNull()) {
|
2024-01-11 19:43:27 +01:00
|
|
|
LogError("%s: OpenUndoFile failed\n", __func__);
|
2024-01-11 18:47:54 +01:00
|
|
|
return false;
|
2021-04-19 08:45:35 +02:00
|
|
|
}
|
2021-04-18 17:09:48 +02:00
|
|
|
|
|
|
|
// Write index header
|
2023-11-01 22:46:17 +10:00
|
|
|
unsigned int nSize = GetSerializeSize(blockundo);
|
2023-07-31 14:31:11 +02:00
|
|
|
fileout << GetParams().MessageStart() << nSize;
|
2021-04-18 17:09:48 +02:00
|
|
|
|
|
|
|
// Write undo data
|
|
|
|
long fileOutPos = ftell(fileout.Get());
|
2021-04-19 08:45:35 +02:00
|
|
|
if (fileOutPos < 0) {
|
2024-01-11 19:43:27 +01:00
|
|
|
LogError("%s: ftell failed\n", __func__);
|
2024-01-11 18:47:54 +01:00
|
|
|
return false;
|
2021-04-19 08:45:35 +02:00
|
|
|
}
|
2021-04-18 17:09:48 +02:00
|
|
|
pos.nPos = (unsigned int)fileOutPos;
|
|
|
|
fileout << blockundo;
|
|
|
|
|
|
|
|
// calculate & write checksum
|
2022-06-10 12:02:12 +02:00
|
|
|
HashWriter hasher{};
|
2021-04-18 17:09:48 +02:00
|
|
|
hasher << hashBlock;
|
|
|
|
hasher << blockundo;
|
|
|
|
fileout << hasher.GetHash();
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2023-02-18 15:49:41 +01:00
|
|
|
bool BlockManager::UndoReadFromDisk(CBlockUndo& blockundo, const CBlockIndex& index) const
|
2021-04-18 17:09:48 +02:00
|
|
|
{
|
2023-02-18 15:49:41 +01:00
|
|
|
const FlatFilePos pos{WITH_LOCK(::cs_main, return index.GetUndoPos())};
|
2021-09-09 18:48:22 +02:00
|
|
|
|
2021-04-18 17:09:48 +02:00
|
|
|
if (pos.IsNull()) {
|
2024-01-11 19:43:27 +01:00
|
|
|
LogError("%s: no undo data available\n", __func__);
|
2024-01-11 18:47:54 +01:00
|
|
|
return false;
|
2021-04-18 17:09:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Open history file to read
|
2023-11-15 13:07:28 +10:00
|
|
|
AutoFile filein{OpenUndoFile(pos, true)};
|
2021-04-19 08:45:35 +02:00
|
|
|
if (filein.IsNull()) {
|
2024-01-11 19:43:27 +01:00
|
|
|
LogError("%s: OpenUndoFile failed\n", __func__);
|
2024-01-11 18:47:54 +01:00
|
|
|
return false;
|
2021-04-19 08:45:35 +02:00
|
|
|
}
|
2021-04-18 17:09:48 +02:00
|
|
|
|
|
|
|
// Read block
|
|
|
|
uint256 hashChecksum;
|
2023-01-03 13:03:19 +01:00
|
|
|
HashVerifier verifier{filein}; // Use HashVerifier as reserializing may lose data, c.f. commit d342424301013ec47dc146a4beb49d5c9319d80a
|
2021-04-18 17:09:48 +02:00
|
|
|
try {
|
2023-02-18 15:49:41 +01:00
|
|
|
verifier << index.pprev->GetBlockHash();
|
2021-04-18 17:09:48 +02:00
|
|
|
verifier >> blockundo;
|
|
|
|
filein >> hashChecksum;
|
2021-04-19 08:45:35 +02:00
|
|
|
} catch (const std::exception& e) {
|
2024-01-11 19:43:27 +01:00
|
|
|
LogError("%s: Deserialize or I/O error - %s\n", __func__, e.what());
|
2024-01-11 18:47:54 +01:00
|
|
|
return false;
|
2021-04-18 17:09:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Verify checksum
|
2021-04-19 08:45:35 +02:00
|
|
|
if (hashChecksum != verifier.GetHash()) {
|
2024-01-11 19:43:27 +01:00
|
|
|
LogError("%s: Checksum mismatch\n", __func__);
|
2024-01-11 18:47:54 +01:00
|
|
|
return false;
|
2021-04-19 08:45:35 +02:00
|
|
|
}
|
2021-04-18 17:09:48 +02:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2023-07-25 12:03:26 +02:00
|
|
|
bool BlockManager::FlushUndoFile(int block_file, bool finalize)
|
2021-04-18 17:09:48 +02:00
|
|
|
{
|
2022-01-05 15:44:16 +01:00
|
|
|
FlatFilePos undo_pos_old(block_file, m_blockfile_info[block_file].nUndoSize);
|
2021-04-18 17:09:48 +02:00
|
|
|
if (!UndoFileSeq().Flush(undo_pos_old, finalize)) {
|
2024-03-15 21:42:44 +01:00
|
|
|
m_opts.notifications.flushError(_("Flushing undo file to disk failed. This is likely the result of an I/O error."));
|
2023-07-25 12:03:26 +02:00
|
|
|
return false;
|
2021-04-18 17:09:48 +02:00
|
|
|
}
|
2023-07-25 12:03:26 +02:00
|
|
|
return true;
|
2021-04-18 17:09:48 +02:00
|
|
|
}
|
|
|
|
|
2023-05-03 14:55:03 -04:00
|
|
|
bool BlockManager::FlushBlockFile(int blockfile_num, bool fFinalize, bool finalize_undo)
|
2021-04-18 17:09:48 +02:00
|
|
|
{
|
2023-07-25 11:32:09 +02:00
|
|
|
bool success = true;
|
2021-04-18 17:09:48 +02:00
|
|
|
LOCK(cs_LastBlockFile);
|
2022-02-02 09:47:13 -05:00
|
|
|
|
|
|
|
if (m_blockfile_info.size() < 1) {
|
|
|
|
// Return if we haven't loaded any blockfiles yet. This happens during
|
|
|
|
// chainstate init, when we call ChainstateManager::MaybeRebalanceCaches() (which
|
|
|
|
// then calls FlushStateToDisk()), resulting in a call to this function before we
|
|
|
|
// have populated `m_blockfile_info` via LoadBlockIndexDB().
|
2023-07-25 11:32:09 +02:00
|
|
|
return true;
|
2022-02-02 09:47:13 -05:00
|
|
|
}
|
2023-05-03 14:55:03 -04:00
|
|
|
assert(static_cast<int>(m_blockfile_info.size()) > blockfile_num);
|
2022-02-02 09:47:13 -05:00
|
|
|
|
2023-05-03 14:55:03 -04:00
|
|
|
FlatFilePos block_pos_old(blockfile_num, m_blockfile_info[blockfile_num].nSize);
|
2021-04-18 17:09:48 +02:00
|
|
|
if (!BlockFileSeq().Flush(block_pos_old, fFinalize)) {
|
2024-03-15 21:42:44 +01:00
|
|
|
m_opts.notifications.flushError(_("Flushing block file to disk failed. This is likely the result of an I/O error."));
|
2023-07-25 11:32:09 +02:00
|
|
|
success = false;
|
2021-04-18 17:09:48 +02:00
|
|
|
}
|
|
|
|
// we do not always flush the undo file, as the chain tip may be lagging behind the incoming blocks,
|
|
|
|
// e.g. during IBD or a sync after a node going offline
|
2023-07-25 12:03:26 +02:00
|
|
|
if (!fFinalize || finalize_undo) {
|
2023-05-03 14:55:03 -04:00
|
|
|
if (!FlushUndoFile(blockfile_num, finalize_undo)) {
|
2023-07-25 12:03:26 +02:00
|
|
|
success = false;
|
|
|
|
}
|
|
|
|
}
|
2023-07-25 11:32:09 +02:00
|
|
|
return success;
|
2021-04-18 17:09:48 +02:00
|
|
|
}
|
|
|
|
|
2023-05-03 14:55:03 -04:00
|
|
|
BlockfileType BlockManager::BlockfileTypeForHeight(int height)
|
|
|
|
{
|
|
|
|
if (!m_snapshot_height) {
|
|
|
|
return BlockfileType::NORMAL;
|
|
|
|
}
|
|
|
|
return (height >= *m_snapshot_height) ? BlockfileType::ASSUMED : BlockfileType::NORMAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool BlockManager::FlushChainstateBlockFile(int tip_height)
|
|
|
|
{
|
|
|
|
LOCK(cs_LastBlockFile);
|
|
|
|
auto& cursor = m_blockfile_cursors[BlockfileTypeForHeight(tip_height)];
|
2023-10-03 00:22:37 +02:00
|
|
|
// If the cursor does not exist, it means an assumeutxo snapshot is loaded,
|
|
|
|
// but no blocks past the snapshot height have been written yet, so there
|
|
|
|
// is no data associated with the chainstate, and it is safe not to flush.
|
2023-05-03 14:55:03 -04:00
|
|
|
if (cursor) {
|
|
|
|
return FlushBlockFile(cursor->file_num, /*fFinalize=*/false, /*finalize_undo=*/false);
|
|
|
|
}
|
2023-10-03 11:15:15 +02:00
|
|
|
// No need to log warnings in this case.
|
|
|
|
return true;
|
2023-05-03 14:55:03 -04:00
|
|
|
}
|
|
|
|
|
2022-01-04 14:04:30 +01:00
|
|
|
uint64_t BlockManager::CalculateCurrentUsage()
|
2021-04-18 17:09:48 +02:00
|
|
|
{
|
|
|
|
LOCK(cs_LastBlockFile);
|
|
|
|
|
|
|
|
uint64_t retval = 0;
|
2022-01-05 15:44:16 +01:00
|
|
|
for (const CBlockFileInfo& file : m_blockfile_info) {
|
2021-04-18 17:09:48 +02:00
|
|
|
retval += file.nSize + file.nUndoSize;
|
|
|
|
}
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2023-02-18 15:49:41 +01:00
|
|
|
void BlockManager::UnlinkPrunedFiles(const std::set<int>& setFilesToPrune) const
|
2021-04-18 17:09:48 +02:00
|
|
|
{
|
2022-12-20 12:25:36 -05:00
|
|
|
std::error_code ec;
|
2021-04-18 17:09:48 +02:00
|
|
|
for (std::set<int>::iterator it = setFilesToPrune.begin(); it != setFilesToPrune.end(); ++it) {
|
|
|
|
FlatFilePos pos(*it, 0);
|
2022-12-20 12:25:36 -05:00
|
|
|
const bool removed_blockfile{fs::remove(BlockFileSeq().FileName(pos), ec)};
|
|
|
|
const bool removed_undofile{fs::remove(UndoFileSeq().FileName(pos), ec)};
|
|
|
|
if (removed_blockfile || removed_undofile) {
|
2023-05-12 01:06:58 +02:00
|
|
|
LogPrint(BCLog::BLOCKSTORAGE, "Prune: %s deleted blk/rev (%05u)\n", __func__, *it);
|
2022-12-20 12:25:36 -05:00
|
|
|
}
|
2021-04-18 17:09:48 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-02-18 15:49:41 +01:00
|
|
|
FlatFileSeq BlockManager::BlockFileSeq() const
|
2021-04-18 17:09:48 +02:00
|
|
|
{
|
2023-02-18 18:17:33 +01:00
|
|
|
return FlatFileSeq(m_opts.blocks_dir, "blk", m_opts.fast_prune ? 0x4000 /* 16kb */ : BLOCKFILE_CHUNK_SIZE);
|
2021-04-18 17:09:48 +02:00
|
|
|
}
|
|
|
|
|
2023-02-18 15:49:41 +01:00
|
|
|
FlatFileSeq BlockManager::UndoFileSeq() const
|
2021-04-18 17:09:48 +02:00
|
|
|
{
|
2023-02-18 18:17:33 +01:00
|
|
|
return FlatFileSeq(m_opts.blocks_dir, "rev", UNDOFILE_CHUNK_SIZE);
|
2021-04-18 17:09:48 +02:00
|
|
|
}
|
|
|
|
|
2023-11-15 13:07:28 +10:00
|
|
|
AutoFile BlockManager::OpenBlockFile(const FlatFilePos& pos, bool fReadOnly) const
|
2021-04-19 08:45:35 +02:00
|
|
|
{
|
2023-11-15 13:07:28 +10:00
|
|
|
return AutoFile{BlockFileSeq().Open(pos, fReadOnly)};
|
2021-04-18 17:09:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Open an undo file (rev?????.dat) */
|
2023-11-15 13:07:28 +10:00
|
|
|
AutoFile BlockManager::OpenUndoFile(const FlatFilePos& pos, bool fReadOnly) const
|
2021-04-19 08:45:35 +02:00
|
|
|
{
|
2023-11-15 13:07:28 +10:00
|
|
|
return AutoFile{UndoFileSeq().Open(pos, fReadOnly)};
|
2021-04-18 17:09:48 +02:00
|
|
|
}
|
|
|
|
|
2023-02-18 15:49:41 +01:00
|
|
|
fs::path BlockManager::GetBlockPosFilename(const FlatFilePos& pos) const
|
2021-04-18 17:09:48 +02:00
|
|
|
{
|
|
|
|
return BlockFileSeq().FileName(pos);
|
|
|
|
}
|
|
|
|
|
2024-05-10 15:08:55 -04:00
|
|
|
FlatFilePos BlockManager::FindNextBlockPos(unsigned int nAddSize, unsigned int nHeight, uint64_t nTime)
|
2021-04-18 17:09:48 +02:00
|
|
|
{
|
|
|
|
LOCK(cs_LastBlockFile);
|
|
|
|
|
2023-05-03 14:55:03 -04:00
|
|
|
const BlockfileType chain_type = BlockfileTypeForHeight(nHeight);
|
|
|
|
|
|
|
|
if (!m_blockfile_cursors[chain_type]) {
|
|
|
|
// If a snapshot is loaded during runtime, we may not have initialized this cursor yet.
|
|
|
|
assert(chain_type == BlockfileType::ASSUMED);
|
|
|
|
const auto new_cursor = BlockfileCursor{this->MaxBlockfileNum() + 1};
|
|
|
|
m_blockfile_cursors[chain_type] = new_cursor;
|
|
|
|
LogPrint(BCLog::BLOCKSTORAGE, "[%s] initializing blockfile cursor to %s\n", chain_type, new_cursor);
|
|
|
|
}
|
|
|
|
const int last_blockfile = m_blockfile_cursors[chain_type]->file_num;
|
|
|
|
|
2024-03-20 15:05:08 -04:00
|
|
|
int nFile = last_blockfile;
|
2023-05-03 14:55:03 -04:00
|
|
|
if (static_cast<int>(m_blockfile_info.size()) <= nFile) {
|
2022-01-05 15:44:16 +01:00
|
|
|
m_blockfile_info.resize(nFile + 1);
|
2021-04-18 17:09:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
bool finalize_undo = false;
|
2024-03-20 15:05:08 -04:00
|
|
|
unsigned int max_blockfile_size{MAX_BLOCKFILE_SIZE};
|
|
|
|
// Use smaller blockfiles in test-only -fastprune mode - but avoid
|
|
|
|
// the possibility of having a block not fit into the block file.
|
|
|
|
if (m_opts.fast_prune) {
|
|
|
|
max_blockfile_size = 0x10000; // 64kiB
|
|
|
|
if (nAddSize >= max_blockfile_size) {
|
|
|
|
// dynamically adjust the blockfile size to be larger than the added size
|
|
|
|
max_blockfile_size = nAddSize + 1;
|
2023-02-28 18:26:23 -05:00
|
|
|
}
|
2024-03-20 15:05:08 -04:00
|
|
|
}
|
|
|
|
assert(nAddSize < max_blockfile_size);
|
|
|
|
|
|
|
|
while (m_blockfile_info[nFile].nSize + nAddSize >= max_blockfile_size) {
|
|
|
|
// when the undo file is keeping up with the block file, we want to flush it explicitly
|
|
|
|
// when it is lagging behind (more blocks arrive than are being connected), we let the
|
|
|
|
// undo block write case handle it
|
|
|
|
finalize_undo = (static_cast<int>(m_blockfile_info[nFile].nHeightLast) ==
|
|
|
|
Assert(m_blockfile_cursors[chain_type])->undo_height);
|
|
|
|
|
|
|
|
// Try the next unclaimed blockfile number
|
|
|
|
nFile = this->MaxBlockfileNum() + 1;
|
|
|
|
// Set to increment MaxBlockfileNum() for next iteration
|
|
|
|
m_blockfile_cursors[chain_type] = BlockfileCursor{nFile};
|
|
|
|
|
|
|
|
if (static_cast<int>(m_blockfile_info.size()) <= nFile) {
|
|
|
|
m_blockfile_info.resize(nFile + 1);
|
2021-04-18 17:09:48 +02:00
|
|
|
}
|
|
|
|
}
|
2024-05-10 15:08:55 -04:00
|
|
|
FlatFilePos pos;
|
2024-03-20 15:05:08 -04:00
|
|
|
pos.nFile = nFile;
|
|
|
|
pos.nPos = m_blockfile_info[nFile].nSize;
|
2021-04-18 17:09:48 +02:00
|
|
|
|
2023-05-03 14:55:03 -04:00
|
|
|
if (nFile != last_blockfile) {
|
2024-03-20 15:05:08 -04:00
|
|
|
LogPrint(BCLog::BLOCKSTORAGE, "Leaving block file %i: %s (onto %i) (height %i)\n",
|
|
|
|
last_blockfile, m_blockfile_info[last_blockfile].ToString(), nFile, nHeight);
|
|
|
|
|
|
|
|
// Do not propagate the return code. The flush concerns a previous block
|
|
|
|
// and undo file that has already been written to. If a flush fails
|
|
|
|
// here, and we crash, there is no expected additional block data
|
|
|
|
// inconsistency arising from the flush failure here. However, the undo
|
|
|
|
// data may be inconsistent after a crash if the flush is called during
|
|
|
|
// a reindex. A flush error might also leave some of the data files
|
|
|
|
// untrimmed.
|
|
|
|
if (!FlushBlockFile(last_blockfile, /*fFinalize=*/true, finalize_undo)) {
|
|
|
|
LogPrintLevel(BCLog::BLOCKSTORAGE, BCLog::Level::Warning,
|
|
|
|
"Failed to flush previous block file %05i (finalize=1, finalize_undo=%i) before opening new block file %05i\n",
|
|
|
|
last_blockfile, finalize_undo, nFile);
|
2023-07-25 11:32:09 +02:00
|
|
|
}
|
2023-05-03 14:55:03 -04:00
|
|
|
// No undo data yet in the new file, so reset our undo-height tracking.
|
|
|
|
m_blockfile_cursors[chain_type] = BlockfileCursor{nFile};
|
2021-04-18 17:09:48 +02:00
|
|
|
}
|
|
|
|
|
2022-01-05 15:44:16 +01:00
|
|
|
m_blockfile_info[nFile].AddBlock(nHeight, nTime);
|
2024-03-20 15:05:08 -04:00
|
|
|
m_blockfile_info[nFile].nSize += nAddSize;
|
2021-04-18 17:09:48 +02:00
|
|
|
|
2024-03-20 15:05:08 -04:00
|
|
|
bool out_of_space;
|
|
|
|
size_t bytes_allocated = BlockFileSeq().Allocate(pos, nAddSize, out_of_space);
|
|
|
|
if (out_of_space) {
|
|
|
|
m_opts.notifications.fatalError(_("Disk space is too low!"));
|
2024-05-10 15:08:55 -04:00
|
|
|
return {};
|
2024-03-20 15:05:08 -04:00
|
|
|
}
|
|
|
|
if (bytes_allocated != 0 && IsPruneMode()) {
|
|
|
|
m_check_for_pruning = true;
|
2021-04-19 08:45:35 +02:00
|
|
|
}
|
2021-04-18 17:09:48 +02:00
|
|
|
|
2022-01-05 15:44:16 +01:00
|
|
|
m_dirty_fileinfo.insert(nFile);
|
2024-05-10 15:08:55 -04:00
|
|
|
return pos;
|
2021-04-18 17:09:48 +02:00
|
|
|
}
|
|
|
|
|
2024-03-20 15:05:08 -04:00
|
|
|
void BlockManager::UpdateBlockInfo(const CBlock& block, unsigned int nHeight, const FlatFilePos& pos)
|
|
|
|
{
|
|
|
|
LOCK(cs_LastBlockFile);
|
|
|
|
|
2024-05-09 18:34:14 -04:00
|
|
|
// Update the cursor so it points to the last file.
|
|
|
|
const BlockfileType chain_type{BlockfileTypeForHeight(nHeight)};
|
|
|
|
auto& cursor{m_blockfile_cursors[chain_type]};
|
|
|
|
if (!cursor || cursor->file_num < pos.nFile) {
|
|
|
|
m_blockfile_cursors[chain_type] = BlockfileCursor{pos.nFile};
|
2021-04-18 17:09:48 +02:00
|
|
|
}
|
|
|
|
|
2024-05-09 18:34:14 -04:00
|
|
|
// Update the file information with the current block.
|
2024-03-20 15:05:08 -04:00
|
|
|
const unsigned int added_size = ::GetSerializeSize(TX_WITH_WITNESS(block));
|
|
|
|
const int nFile = pos.nFile;
|
|
|
|
if (static_cast<int>(m_blockfile_info.size()) <= nFile) {
|
|
|
|
m_blockfile_info.resize(nFile + 1);
|
|
|
|
}
|
|
|
|
m_blockfile_info[nFile].AddBlock(nHeight, block.GetBlockTime());
|
|
|
|
m_blockfile_info[nFile].nSize = std::max(pos.nPos + added_size, m_blockfile_info[nFile].nSize);
|
2022-01-05 15:44:16 +01:00
|
|
|
m_dirty_fileinfo.insert(nFile);
|
2021-04-18 17:09:48 +02:00
|
|
|
}
|
|
|
|
|
2022-01-04 14:04:30 +01:00
|
|
|
bool BlockManager::FindUndoPos(BlockValidationState& state, int nFile, FlatFilePos& pos, unsigned int nAddSize)
|
2021-04-18 17:09:48 +02:00
|
|
|
{
|
|
|
|
pos.nFile = nFile;
|
|
|
|
|
|
|
|
LOCK(cs_LastBlockFile);
|
|
|
|
|
2022-01-05 15:44:16 +01:00
|
|
|
pos.nPos = m_blockfile_info[nFile].nUndoSize;
|
|
|
|
m_blockfile_info[nFile].nUndoSize += nAddSize;
|
|
|
|
m_dirty_fileinfo.insert(nFile);
|
2021-04-18 17:09:48 +02:00
|
|
|
|
|
|
|
bool out_of_space;
|
|
|
|
size_t bytes_allocated = UndoFileSeq().Allocate(pos, nAddSize, out_of_space);
|
|
|
|
if (out_of_space) {
|
2024-03-15 21:42:44 +01:00
|
|
|
return FatalError(m_opts.notifications, state, _("Disk space is too low!"));
|
2021-04-18 17:09:48 +02:00
|
|
|
}
|
2023-01-03 12:47:38 +01:00
|
|
|
if (bytes_allocated != 0 && IsPruneMode()) {
|
2022-01-05 15:44:16 +01:00
|
|
|
m_check_for_pruning = true;
|
2021-04-18 17:09:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
2021-04-02 20:42:05 +02:00
|
|
|
|
2023-07-31 14:31:11 +02:00
|
|
|
bool BlockManager::WriteBlockToDisk(const CBlock& block, FlatFilePos& pos) const
|
2021-04-02 20:42:05 +02:00
|
|
|
{
|
|
|
|
// Open history file to append
|
2023-11-15 13:07:28 +10:00
|
|
|
AutoFile fileout{OpenBlockFile(pos)};
|
2021-04-02 19:27:59 +02:00
|
|
|
if (fileout.IsNull()) {
|
2024-01-11 19:43:27 +01:00
|
|
|
LogError("WriteBlockToDisk: OpenBlockFile failed\n");
|
2024-01-11 18:47:54 +01:00
|
|
|
return false;
|
2021-04-02 19:27:59 +02:00
|
|
|
}
|
2021-04-02 20:42:05 +02:00
|
|
|
|
|
|
|
// Write index header
|
2023-09-07 19:16:57 +10:00
|
|
|
unsigned int nSize = GetSerializeSize(TX_WITH_WITNESS(block));
|
2023-07-31 14:31:11 +02:00
|
|
|
fileout << GetParams().MessageStart() << nSize;
|
2021-04-02 20:42:05 +02:00
|
|
|
|
|
|
|
// Write block
|
|
|
|
long fileOutPos = ftell(fileout.Get());
|
2021-04-02 19:27:59 +02:00
|
|
|
if (fileOutPos < 0) {
|
2024-01-11 19:43:27 +01:00
|
|
|
LogError("WriteBlockToDisk: ftell failed\n");
|
2024-01-11 18:47:54 +01:00
|
|
|
return false;
|
2021-04-02 19:27:59 +02:00
|
|
|
}
|
2021-04-02 20:42:05 +02:00
|
|
|
pos.nPos = (unsigned int)fileOutPos;
|
2023-09-07 19:16:57 +10:00
|
|
|
fileout << TX_WITH_WITNESS(block);
|
2021-04-02 20:42:05 +02:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2023-05-04 12:40:21 +02:00
|
|
|
bool BlockManager::WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValidationState& state, CBlockIndex& block)
|
2021-04-18 17:09:48 +02:00
|
|
|
{
|
2021-10-21 16:56:34 +02:00
|
|
|
AssertLockHeld(::cs_main);
|
2023-05-03 14:55:03 -04:00
|
|
|
const BlockfileType type = BlockfileTypeForHeight(block.nHeight);
|
|
|
|
auto& cursor = *Assert(WITH_LOCK(cs_LastBlockFile, return m_blockfile_cursors[type]));
|
|
|
|
|
2021-04-18 17:09:48 +02:00
|
|
|
// Write undo information to disk
|
2023-05-04 12:30:34 +02:00
|
|
|
if (block.GetUndoPos().IsNull()) {
|
2021-04-18 17:09:48 +02:00
|
|
|
FlatFilePos _pos;
|
2023-11-01 22:46:17 +10:00
|
|
|
if (!FindUndoPos(state, block.nFile, _pos, ::GetSerializeSize(blockundo) + 40)) {
|
2024-01-11 19:43:27 +01:00
|
|
|
LogError("ConnectBlock(): FindUndoPos failed\n");
|
2024-01-11 18:47:54 +01:00
|
|
|
return false;
|
2021-04-19 08:45:35 +02:00
|
|
|
}
|
2023-07-31 14:31:11 +02:00
|
|
|
if (!UndoWriteToDisk(blockundo, _pos, block.pprev->GetBlockHash())) {
|
2024-03-15 21:42:44 +01:00
|
|
|
return FatalError(m_opts.notifications, state, _("Failed to write undo data."));
|
2021-04-19 08:45:35 +02:00
|
|
|
}
|
2021-04-18 17:09:48 +02:00
|
|
|
// rev files are written in block height order, whereas blk files are written as blocks come in (often out of order)
|
|
|
|
// we want to flush the rev (undo) file once we've written the last block, which is indicated by the last height
|
|
|
|
// in the block file info as below; note that this does not catch the case where the undo writes are keeping up
|
|
|
|
// with the block writes (usually when a synced up node is getting newly mined blocks) -- this case is caught in
|
2024-05-10 15:08:55 -04:00
|
|
|
// the FindNextBlockPos function
|
2023-05-03 14:55:03 -04:00
|
|
|
if (_pos.nFile < cursor.file_num && static_cast<uint32_t>(block.nHeight) == m_blockfile_info[_pos.nFile].nHeightLast) {
|
2023-07-25 12:03:26 +02:00
|
|
|
// Do not propagate the return code, a failed flush here should not
|
|
|
|
// be an indication for a failed write. If it were propagated here,
|
|
|
|
// the caller would assume the undo data not to be written, when in
|
|
|
|
// fact it is. Note though, that a failed flush might leave the data
|
|
|
|
// file untrimmed.
|
|
|
|
if (!FlushUndoFile(_pos.nFile, true)) {
|
|
|
|
LogPrintLevel(BCLog::BLOCKSTORAGE, BCLog::Level::Warning, "Failed to flush undo file %05i\n", _pos.nFile);
|
|
|
|
}
|
2023-05-03 14:55:03 -04:00
|
|
|
} else if (_pos.nFile == cursor.file_num && block.nHeight > cursor.undo_height) {
|
|
|
|
cursor.undo_height = block.nHeight;
|
2021-04-18 17:09:48 +02:00
|
|
|
}
|
|
|
|
// update nUndoPos in block index
|
2023-05-04 12:30:34 +02:00
|
|
|
block.nUndoPos = _pos.nPos;
|
|
|
|
block.nStatus |= BLOCK_HAVE_UNDO;
|
|
|
|
m_dirty_blockindex.insert(&block);
|
2021-04-18 17:09:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2023-02-18 15:49:41 +01:00
|
|
|
bool BlockManager::ReadBlockFromDisk(CBlock& block, const FlatFilePos& pos) const
|
2021-04-02 20:42:05 +02:00
|
|
|
{
|
|
|
|
block.SetNull();
|
|
|
|
|
|
|
|
// Open history file to read
|
2023-11-15 13:07:28 +10:00
|
|
|
AutoFile filein{OpenBlockFile(pos, true)};
|
2021-04-02 19:27:59 +02:00
|
|
|
if (filein.IsNull()) {
|
2024-01-11 19:43:27 +01:00
|
|
|
LogError("ReadBlockFromDisk: OpenBlockFile failed for %s\n", pos.ToString());
|
2024-01-11 18:47:54 +01:00
|
|
|
return false;
|
2021-04-02 19:27:59 +02:00
|
|
|
}
|
2021-04-02 20:42:05 +02:00
|
|
|
|
|
|
|
// Read block
|
|
|
|
try {
|
2023-09-07 19:16:57 +10:00
|
|
|
filein >> TX_WITH_WITNESS(block);
|
2021-04-02 19:27:59 +02:00
|
|
|
} catch (const std::exception& e) {
|
2024-01-11 19:43:27 +01:00
|
|
|
LogError("%s: Deserialize or I/O error - %s at %s\n", __func__, e.what(), pos.ToString());
|
2024-01-11 18:47:54 +01:00
|
|
|
return false;
|
2021-04-02 20:42:05 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check the header
|
2023-02-18 15:49:41 +01:00
|
|
|
if (!CheckProofOfWork(block.GetHash(), block.nBits, GetConsensus())) {
|
2024-01-11 19:43:27 +01:00
|
|
|
LogError("ReadBlockFromDisk: Errors in block header at %s\n", pos.ToString());
|
2024-01-11 18:47:54 +01:00
|
|
|
return false;
|
2021-04-02 19:27:59 +02:00
|
|
|
}
|
2021-04-02 20:42:05 +02:00
|
|
|
|
|
|
|
// Signet only: check block solution
|
2023-02-18 15:49:41 +01:00
|
|
|
if (GetConsensus().signet_blocks && !CheckSignetBlockSolution(block, GetConsensus())) {
|
2024-01-11 19:43:27 +01:00
|
|
|
LogError("ReadBlockFromDisk: Errors in block solution at %s\n", pos.ToString());
|
2024-01-11 18:47:54 +01:00
|
|
|
return false;
|
2021-04-02 20:42:05 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2023-02-18 15:49:41 +01:00
|
|
|
bool BlockManager::ReadBlockFromDisk(CBlock& block, const CBlockIndex& index) const
|
2021-04-02 20:42:05 +02:00
|
|
|
{
|
2023-02-18 15:49:41 +01:00
|
|
|
const FlatFilePos block_pos{WITH_LOCK(cs_main, return index.GetBlockPos())};
|
2021-04-02 20:42:05 +02:00
|
|
|
|
2023-02-18 15:49:41 +01:00
|
|
|
if (!ReadBlockFromDisk(block, block_pos)) {
|
2021-04-02 20:42:05 +02:00
|
|
|
return false;
|
2021-04-02 19:27:59 +02:00
|
|
|
}
|
2023-02-18 15:49:41 +01:00
|
|
|
if (block.GetHash() != index.GetBlockHash()) {
|
2024-01-11 19:43:27 +01:00
|
|
|
LogError("ReadBlockFromDisk(CBlock&, CBlockIndex*): GetHash() doesn't match index for %s at %s\n",
|
2023-02-18 15:49:41 +01:00
|
|
|
index.ToString(), block_pos.ToString());
|
2024-01-11 18:47:54 +01:00
|
|
|
return false;
|
2021-04-02 19:27:59 +02:00
|
|
|
}
|
2021-04-02 20:42:05 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2023-07-31 14:31:11 +02:00
|
|
|
bool BlockManager::ReadRawBlockFromDisk(std::vector<uint8_t>& block, const FlatFilePos& pos) const
|
2021-04-02 20:42:05 +02:00
|
|
|
{
|
|
|
|
FlatFilePos hpos = pos;
|
2024-03-12 12:46:07 -04:00
|
|
|
// If nPos is less than 8 the pos is null and we don't have the block data
|
|
|
|
// Return early to prevent undefined behavior of unsigned int underflow
|
|
|
|
if (hpos.nPos < 8) {
|
|
|
|
LogError("%s: OpenBlockFile failed for %s\n", __func__, pos.ToString());
|
|
|
|
return false;
|
|
|
|
}
|
2021-04-02 20:42:05 +02:00
|
|
|
hpos.nPos -= 8; // Seek back 8 bytes for meta header
|
2023-11-15 13:07:28 +10:00
|
|
|
AutoFile filein{OpenBlockFile(hpos, true)};
|
2021-04-02 20:42:05 +02:00
|
|
|
if (filein.IsNull()) {
|
2024-01-11 19:43:27 +01:00
|
|
|
LogError("%s: OpenBlockFile failed for %s\n", __func__, pos.ToString());
|
2024-01-11 18:47:54 +01:00
|
|
|
return false;
|
2021-04-02 20:42:05 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
try {
|
2023-09-06 15:55:14 +02:00
|
|
|
MessageStartChars blk_start;
|
2021-04-02 20:42:05 +02:00
|
|
|
unsigned int blk_size;
|
|
|
|
|
|
|
|
filein >> blk_start >> blk_size;
|
|
|
|
|
2023-09-12 13:01:07 +02:00
|
|
|
if (blk_start != GetParams().MessageStart()) {
|
2024-01-11 19:43:27 +01:00
|
|
|
LogError("%s: Block magic mismatch for %s: %s versus expected %s\n", __func__, pos.ToString(),
|
2021-04-02 20:42:05 +02:00
|
|
|
HexStr(blk_start),
|
2023-07-31 14:31:11 +02:00
|
|
|
HexStr(GetParams().MessageStart()));
|
2024-01-11 19:19:04 +01:00
|
|
|
return false;
|
2021-04-02 20:42:05 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (blk_size > MAX_SIZE) {
|
2024-01-11 19:43:27 +01:00
|
|
|
LogError("%s: Block data is larger than maximum deserialization size for %s: %s versus %s\n", __func__, pos.ToString(),
|
2021-04-02 20:42:05 +02:00
|
|
|
blk_size, MAX_SIZE);
|
2024-01-11 18:47:54 +01:00
|
|
|
return false;
|
2021-04-02 20:42:05 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
block.resize(blk_size); // Zeroing of memory is intentional here
|
2022-01-02 11:31:25 +01:00
|
|
|
filein.read(MakeWritableByteSpan(block));
|
2021-04-02 20:42:05 +02:00
|
|
|
} catch (const std::exception& e) {
|
2024-01-11 19:43:27 +01:00
|
|
|
LogError("%s: Read from block file failed: %s for %s\n", __func__, e.what(), pos.ToString());
|
2024-01-11 18:47:54 +01:00
|
|
|
return false;
|
2021-04-02 20:42:05 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2024-04-26 15:06:55 -04:00
|
|
|
FlatFilePos BlockManager::SaveBlockToDisk(const CBlock& block, int nHeight)
|
2021-04-02 20:42:05 +02:00
|
|
|
{
|
2023-09-07 19:16:57 +10:00
|
|
|
unsigned int nBlockSize = ::GetSerializeSize(TX_WITH_WITNESS(block));
|
2024-04-26 15:06:55 -04:00
|
|
|
// Account for the 4 magic message start bytes + the 4 length bytes (8 bytes total,
|
|
|
|
// defined as BLOCK_SERIALIZATION_HEADER_SIZE)
|
|
|
|
nBlockSize += static_cast<unsigned int>(BLOCK_SERIALIZATION_HEADER_SIZE);
|
2024-05-10 15:08:55 -04:00
|
|
|
FlatFilePos blockPos{FindNextBlockPos(nBlockSize, nHeight, block.GetBlockTime())};
|
|
|
|
if (blockPos.IsNull()) {
|
|
|
|
LogError("%s: FindNextBlockPos failed\n", __func__);
|
2021-04-02 20:42:05 +02:00
|
|
|
return FlatFilePos();
|
|
|
|
}
|
2024-04-26 15:06:55 -04:00
|
|
|
if (!WriteBlockToDisk(block, blockPos)) {
|
|
|
|
m_opts.notifications.fatalError(_("Failed to write block."));
|
|
|
|
return FlatFilePos();
|
2021-04-02 20:42:05 +02:00
|
|
|
}
|
|
|
|
return blockPos;
|
|
|
|
}
|
|
|
|
|
2023-01-03 13:06:03 +01:00
|
|
|
class ImportingNow
|
|
|
|
{
|
|
|
|
std::atomic<bool>& m_importing;
|
|
|
|
|
|
|
|
public:
|
|
|
|
ImportingNow(std::atomic<bool>& importing) : m_importing{importing}
|
2021-04-02 19:17:00 +02:00
|
|
|
{
|
2023-01-03 13:06:03 +01:00
|
|
|
assert(m_importing == false);
|
|
|
|
m_importing = true;
|
2021-04-02 19:17:00 +02:00
|
|
|
}
|
2023-01-03 13:06:03 +01:00
|
|
|
~ImportingNow()
|
2021-04-02 19:17:00 +02:00
|
|
|
{
|
2023-01-03 13:06:03 +01:00
|
|
|
assert(m_importing == true);
|
|
|
|
m_importing = false;
|
2021-04-02 19:17:00 +02:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2023-06-17 11:11:51 -03:00
|
|
|
void ImportBlocks(ChainstateManager& chainman, std::vector<fs::path> vImportFiles)
|
2021-04-02 19:17:00 +02:00
|
|
|
{
|
2024-05-10 22:10:34 +02:00
|
|
|
ImportingNow imp{chainman.m_blockman.m_importing};
|
|
|
|
|
|
|
|
// -reindex
|
2024-04-05 12:44:24 +02:00
|
|
|
if (chainman.m_blockman.m_reindexing) {
|
2024-05-10 22:10:34 +02:00
|
|
|
int nFile = 0;
|
|
|
|
// Map of disk positions for blocks with unknown parent (only used for reindex);
|
|
|
|
// parent hash -> child disk position, multiple children can have the same parent.
|
|
|
|
std::multimap<uint256, FlatFilePos> blocks_with_unknown_parent;
|
|
|
|
while (true) {
|
|
|
|
FlatFilePos pos(nFile, 0);
|
|
|
|
if (!fs::exists(chainman.m_blockman.GetBlockPosFilename(pos))) {
|
|
|
|
break; // No block files left to reindex
|
2021-04-02 19:17:00 +02:00
|
|
|
}
|
2024-05-10 22:10:34 +02:00
|
|
|
AutoFile file{chainman.m_blockman.OpenBlockFile(pos, true)};
|
|
|
|
if (file.IsNull()) {
|
|
|
|
break; // This error is logged in OpenBlockFile
|
|
|
|
}
|
|
|
|
LogPrintf("Reindexing block file blk%05u.dat...\n", (unsigned int)nFile);
|
|
|
|
chainman.LoadExternalBlockFile(file, &pos, &blocks_with_unknown_parent);
|
|
|
|
if (chainman.m_interrupt) {
|
|
|
|
LogPrintf("Interrupt requested. Exit %s\n", __func__);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
nFile++;
|
2021-04-02 19:17:00 +02:00
|
|
|
}
|
2024-05-10 22:10:34 +02:00
|
|
|
WITH_LOCK(::cs_main, chainman.m_blockman.m_block_tree_db->WriteReindexing(false));
|
2024-04-05 12:44:24 +02:00
|
|
|
chainman.m_blockman.m_reindexing = false;
|
2024-05-10 22:10:34 +02:00
|
|
|
LogPrintf("Reindexing finished\n");
|
|
|
|
// To avoid ending up in a situation without genesis block, re-try initializing (no-op if reindexing worked):
|
|
|
|
chainman.ActiveChainstate().LoadGenesisBlock();
|
|
|
|
}
|
|
|
|
|
|
|
|
// -loadblock=
|
|
|
|
for (const fs::path& path : vImportFiles) {
|
|
|
|
AutoFile file{fsbridge::fopen(path, "rb")};
|
|
|
|
if (!file.IsNull()) {
|
|
|
|
LogPrintf("Importing blocks file %s...\n", fs::PathToString(path));
|
|
|
|
chainman.LoadExternalBlockFile(file);
|
|
|
|
if (chainman.m_interrupt) {
|
|
|
|
LogPrintf("Interrupt requested. Exit %s\n", __func__);
|
|
|
|
return;
|
2021-04-02 19:17:00 +02:00
|
|
|
}
|
2024-05-10 22:10:34 +02:00
|
|
|
} else {
|
|
|
|
LogPrintf("Warning: Could not open blocks file %s\n", fs::PathToString(path));
|
2021-04-02 19:17:00 +02:00
|
|
|
}
|
2024-05-10 22:10:34 +02:00
|
|
|
}
|
2021-04-02 19:17:00 +02:00
|
|
|
|
2024-05-10 22:10:34 +02:00
|
|
|
// scan for better chains in the block chain database, that are not yet connected in the active best chain
|
2021-04-02 19:17:00 +02:00
|
|
|
|
2024-05-10 22:10:34 +02:00
|
|
|
// We can't hold cs_main during ActivateBestChain even though we're accessing
|
|
|
|
// the chainman unique_ptrs since ABC requires us not to be holding cs_main, so retrieve
|
|
|
|
// the relevant pointers before the ABC call.
|
|
|
|
for (Chainstate* chainstate : WITH_LOCK(::cs_main, return chainman.GetAll())) {
|
|
|
|
BlockValidationState state;
|
|
|
|
if (!chainstate->ActivateBestChain(state, nullptr)) {
|
|
|
|
chainman.GetNotifications().fatalError(strprintf(_("Failed to connect best block (%s)."), state.ToString()));
|
|
|
|
return;
|
2021-04-02 19:17:00 +02:00
|
|
|
}
|
2024-05-10 22:10:34 +02:00
|
|
|
}
|
|
|
|
// End scope of ImportingNow
|
2021-04-02 19:17:00 +02:00
|
|
|
}
|
2023-05-03 14:55:03 -04:00
|
|
|
|
|
|
|
std::ostream& operator<<(std::ostream& os, const BlockfileType& type) {
|
|
|
|
switch(type) {
|
|
|
|
case BlockfileType::NORMAL: os << "normal"; break;
|
|
|
|
case BlockfileType::ASSUMED: os << "assumed"; break;
|
|
|
|
default: os.setstate(std::ios_base::failbit);
|
|
|
|
}
|
|
|
|
return os;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::ostream& operator<<(std::ostream& os, const BlockfileCursor& cursor) {
|
|
|
|
os << strprintf("BlockfileCursor(file_num=%d, undo_height=%d)", cursor.file_num, cursor.undo_height);
|
|
|
|
return os;
|
|
|
|
}
|
2021-11-12 10:06:00 -05:00
|
|
|
} // namespace node
|