mirror of
https://github.com/bitcoin/bitcoin.git
synced 2025-02-10 10:52:31 -05:00
Merge bitcoin/bitcoin#23581: Move BlockManager to node/blockstorage
fa7efc915b
Fixup style of moved code (MarcoFalke)fade2a44f4
Move BlockManager to node/blockstorage (MarcoFalke) Pull request description: `BlockManager` is responsible for reading and writing block(headers). So move it to the existing `blockstorage` module in `node`. Also, move validation code unrelated to block-storage out from `BlockManager`. ACKs for top commit: ryanofsky: Code review obvious ACKfa7efc915b
Tree-SHA512: 0197943d818e5f59e743b07fbb92e7661bff90081127a41e35e5692ce49d6f6a7872448670b0da282f7714580a45c8d93e571a67177c8b5f785ce9edefe834c5
This commit is contained in:
commit
2f37b221d1
4 changed files with 455 additions and 453 deletions
|
@ -12,6 +12,7 @@
|
||||||
#include <fs.h>
|
#include <fs.h>
|
||||||
#include <hash.h>
|
#include <hash.h>
|
||||||
#include <pow.h>
|
#include <pow.h>
|
||||||
|
#include <reverse_iterator.h>
|
||||||
#include <shutdown.h>
|
#include <shutdown.h>
|
||||||
#include <signet.h>
|
#include <signet.h>
|
||||||
#include <streams.h>
|
#include <streams.h>
|
||||||
|
@ -47,6 +48,375 @@ static FILE* OpenUndoFile(const FlatFilePos& pos, bool fReadOnly = false);
|
||||||
static FlatFileSeq BlockFileSeq();
|
static FlatFileSeq BlockFileSeq();
|
||||||
static FlatFileSeq UndoFileSeq();
|
static FlatFileSeq UndoFileSeq();
|
||||||
|
|
||||||
|
CBlockIndex* BlockManager::LookupBlockIndex(const uint256& hash) const
|
||||||
|
{
|
||||||
|
AssertLockHeld(cs_main);
|
||||||
|
BlockMap::const_iterator it = m_block_index.find(hash);
|
||||||
|
return it == m_block_index.end() ? nullptr : it->second;
|
||||||
|
}
|
||||||
|
|
||||||
|
CBlockIndex* BlockManager::AddToBlockIndex(const CBlockHeader& block)
|
||||||
|
{
|
||||||
|
AssertLockHeld(cs_main);
|
||||||
|
|
||||||
|
// Check for duplicate
|
||||||
|
uint256 hash = block.GetHash();
|
||||||
|
BlockMap::iterator it = m_block_index.find(hash);
|
||||||
|
if (it != m_block_index.end()) {
|
||||||
|
return it->second;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Construct new block index object
|
||||||
|
CBlockIndex* pindexNew = new CBlockIndex(block);
|
||||||
|
// We assign the sequence id to blocks only when the full data is available,
|
||||||
|
// to avoid miners withholding blocks but broadcasting headers, to get a
|
||||||
|
// competitive advantage.
|
||||||
|
pindexNew->nSequenceId = 0;
|
||||||
|
BlockMap::iterator mi = m_block_index.insert(std::make_pair(hash, pindexNew)).first;
|
||||||
|
pindexNew->phashBlock = &((*mi).first);
|
||||||
|
BlockMap::iterator miPrev = m_block_index.find(block.hashPrevBlock);
|
||||||
|
if (miPrev != m_block_index.end()) {
|
||||||
|
pindexNew->pprev = (*miPrev).second;
|
||||||
|
pindexNew->nHeight = pindexNew->pprev->nHeight + 1;
|
||||||
|
pindexNew->BuildSkip();
|
||||||
|
}
|
||||||
|
pindexNew->nTimeMax = (pindexNew->pprev ? std::max(pindexNew->pprev->nTimeMax, pindexNew->nTime) : pindexNew->nTime);
|
||||||
|
pindexNew->nChainWork = (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) + GetBlockProof(*pindexNew);
|
||||||
|
pindexNew->RaiseValidity(BLOCK_VALID_TREE);
|
||||||
|
if (pindexBestHeader == nullptr || pindexBestHeader->nChainWork < pindexNew->nChainWork)
|
||||||
|
pindexBestHeader = pindexNew;
|
||||||
|
|
||||||
|
setDirtyBlockIndex.insert(pindexNew);
|
||||||
|
|
||||||
|
return pindexNew;
|
||||||
|
}
|
||||||
|
|
||||||
|
void BlockManager::PruneOneBlockFile(const int fileNumber)
|
||||||
|
{
|
||||||
|
AssertLockHeld(cs_main);
|
||||||
|
LOCK(cs_LastBlockFile);
|
||||||
|
|
||||||
|
for (const auto& entry : m_block_index) {
|
||||||
|
CBlockIndex* pindex = entry.second;
|
||||||
|
if (pindex->nFile == fileNumber) {
|
||||||
|
pindex->nStatus &= ~BLOCK_HAVE_DATA;
|
||||||
|
pindex->nStatus &= ~BLOCK_HAVE_UNDO;
|
||||||
|
pindex->nFile = 0;
|
||||||
|
pindex->nDataPos = 0;
|
||||||
|
pindex->nUndoPos = 0;
|
||||||
|
setDirtyBlockIndex.insert(pindex);
|
||||||
|
|
||||||
|
// Prune from m_blocks_unlinked -- any block we prune would have
|
||||||
|
// to be downloaded again in order to consider its chain, at which
|
||||||
|
// point it would be considered as a candidate for
|
||||||
|
// m_blocks_unlinked or setBlockIndexCandidates.
|
||||||
|
auto range = m_blocks_unlinked.equal_range(pindex->pprev);
|
||||||
|
while (range.first != range.second) {
|
||||||
|
std::multimap<CBlockIndex*, CBlockIndex*>::iterator _it = range.first;
|
||||||
|
range.first++;
|
||||||
|
if (_it->second == pindex) {
|
||||||
|
m_blocks_unlinked.erase(_it);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
vinfoBlockFile[fileNumber].SetNull();
|
||||||
|
setDirtyFileInfo.insert(fileNumber);
|
||||||
|
}
|
||||||
|
|
||||||
|
void BlockManager::FindFilesToPruneManual(std::set<int>& setFilesToPrune, int nManualPruneHeight, int chain_tip_height)
|
||||||
|
{
|
||||||
|
assert(fPruneMode && nManualPruneHeight > 0);
|
||||||
|
|
||||||
|
LOCK2(cs_main, cs_LastBlockFile);
|
||||||
|
if (chain_tip_height < 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// last block to prune is the lesser of (user-specified height, MIN_BLOCKS_TO_KEEP from the tip)
|
||||||
|
unsigned int nLastBlockWeCanPrune = std::min((unsigned)nManualPruneHeight, chain_tip_height - MIN_BLOCKS_TO_KEEP);
|
||||||
|
int count = 0;
|
||||||
|
for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) {
|
||||||
|
if (vinfoBlockFile[fileNumber].nSize == 0 || vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
PruneOneBlockFile(fileNumber);
|
||||||
|
setFilesToPrune.insert(fileNumber);
|
||||||
|
count++;
|
||||||
|
}
|
||||||
|
LogPrintf("Prune (Manual): prune_height=%d removed %d blk/rev pairs\n", nLastBlockWeCanPrune, count);
|
||||||
|
}
|
||||||
|
|
||||||
|
void BlockManager::FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfterHeight, int chain_tip_height, int prune_height, bool is_ibd)
|
||||||
|
{
|
||||||
|
LOCK2(cs_main, cs_LastBlockFile);
|
||||||
|
if (chain_tip_height < 0 || nPruneTarget == 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if ((uint64_t)chain_tip_height <= nPruneAfterHeight) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
unsigned int nLastBlockWeCanPrune{(unsigned)std::min(prune_height, chain_tip_height - static_cast<int>(MIN_BLOCKS_TO_KEEP))};
|
||||||
|
uint64_t nCurrentUsage = CalculateCurrentUsage();
|
||||||
|
// We don't check to prune until after we've allocated new space for files
|
||||||
|
// So we should leave a buffer under our target to account for another allocation
|
||||||
|
// before the next pruning.
|
||||||
|
uint64_t nBuffer = BLOCKFILE_CHUNK_SIZE + UNDOFILE_CHUNK_SIZE;
|
||||||
|
uint64_t nBytesToPrune;
|
||||||
|
int count = 0;
|
||||||
|
|
||||||
|
if (nCurrentUsage + nBuffer >= nPruneTarget) {
|
||||||
|
// On a prune event, the chainstate DB is flushed.
|
||||||
|
// To avoid excessive prune events negating the benefit of high dbcache
|
||||||
|
// values, we should not prune too rapidly.
|
||||||
|
// So when pruning in IBD, increase the buffer a bit to avoid a re-prune too soon.
|
||||||
|
if (is_ibd) {
|
||||||
|
// Since this is only relevant during IBD, we use a fixed 10%
|
||||||
|
nBuffer += nPruneTarget / 10;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) {
|
||||||
|
nBytesToPrune = vinfoBlockFile[fileNumber].nSize + vinfoBlockFile[fileNumber].nUndoSize;
|
||||||
|
|
||||||
|
if (vinfoBlockFile[fileNumber].nSize == 0) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (nCurrentUsage + nBuffer < nPruneTarget) { // are we below our target?
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// don't prune files that could have a block within MIN_BLOCKS_TO_KEEP of the main chain's tip but keep scanning
|
||||||
|
if (vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
PruneOneBlockFile(fileNumber);
|
||||||
|
// Queue up the files for removal
|
||||||
|
setFilesToPrune.insert(fileNumber);
|
||||||
|
nCurrentUsage -= nBytesToPrune;
|
||||||
|
count++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
LogPrint(BCLog::PRUNE, "Prune: target=%dMiB actual=%dMiB diff=%dMiB max_prune_height=%d removed %d blk/rev pairs\n",
|
||||||
|
nPruneTarget/1024/1024, nCurrentUsage/1024/1024,
|
||||||
|
((int64_t)nPruneTarget - (int64_t)nCurrentUsage)/1024/1024,
|
||||||
|
nLastBlockWeCanPrune, count);
|
||||||
|
}
|
||||||
|
|
||||||
|
CBlockIndex* BlockManager::InsertBlockIndex(const uint256& hash)
|
||||||
|
{
|
||||||
|
AssertLockHeld(cs_main);
|
||||||
|
|
||||||
|
if (hash.IsNull()) {
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return existing
|
||||||
|
BlockMap::iterator mi = m_block_index.find(hash);
|
||||||
|
if (mi != m_block_index.end()) {
|
||||||
|
return (*mi).second;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create new
|
||||||
|
CBlockIndex* pindexNew = new CBlockIndex();
|
||||||
|
mi = m_block_index.insert(std::make_pair(hash, pindexNew)).first;
|
||||||
|
pindexNew->phashBlock = &((*mi).first);
|
||||||
|
|
||||||
|
return pindexNew;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool BlockManager::LoadBlockIndex(
|
||||||
|
const Consensus::Params& consensus_params,
|
||||||
|
ChainstateManager& chainman)
|
||||||
|
{
|
||||||
|
if (!m_block_tree_db->LoadBlockIndexGuts(consensus_params, [this](const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { return this->InsertBlockIndex(hash); })) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate nChainWork
|
||||||
|
std::vector<std::pair<int, CBlockIndex*>> vSortedByHeight;
|
||||||
|
vSortedByHeight.reserve(m_block_index.size());
|
||||||
|
for (const std::pair<const uint256, CBlockIndex*>& item : m_block_index) {
|
||||||
|
CBlockIndex* pindex = item.second;
|
||||||
|
vSortedByHeight.push_back(std::make_pair(pindex->nHeight, pindex));
|
||||||
|
}
|
||||||
|
sort(vSortedByHeight.begin(), vSortedByHeight.end());
|
||||||
|
|
||||||
|
// Find start of assumed-valid region.
|
||||||
|
int first_assumed_valid_height = std::numeric_limits<int>::max();
|
||||||
|
|
||||||
|
for (const auto& [height, block] : vSortedByHeight) {
|
||||||
|
if (block->IsAssumedValid()) {
|
||||||
|
auto chainstates = chainman.GetAll();
|
||||||
|
|
||||||
|
// If we encounter an assumed-valid block index entry, ensure that we have
|
||||||
|
// one chainstate that tolerates assumed-valid entries and another that does
|
||||||
|
// not (i.e. the background validation chainstate), since assumed-valid
|
||||||
|
// entries should always be pending validation by a fully-validated chainstate.
|
||||||
|
auto any_chain = [&](auto fnc) { return std::any_of(chainstates.cbegin(), chainstates.cend(), fnc); };
|
||||||
|
assert(any_chain([](auto chainstate) { return chainstate->reliesOnAssumedValid(); }));
|
||||||
|
assert(any_chain([](auto chainstate) { return !chainstate->reliesOnAssumedValid(); }));
|
||||||
|
|
||||||
|
first_assumed_valid_height = height;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const std::pair<int, CBlockIndex*>& item : vSortedByHeight) {
|
||||||
|
if (ShutdownRequested()) return false;
|
||||||
|
CBlockIndex* pindex = item.second;
|
||||||
|
pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) + GetBlockProof(*pindex);
|
||||||
|
pindex->nTimeMax = (pindex->pprev ? std::max(pindex->pprev->nTimeMax, pindex->nTime) : pindex->nTime);
|
||||||
|
|
||||||
|
// We can link the chain of blocks for which we've received transactions at some point, or
|
||||||
|
// blocks that are assumed-valid on the basis of snapshot load (see
|
||||||
|
// PopulateAndValidateSnapshot()).
|
||||||
|
// Pruned nodes may have deleted the block.
|
||||||
|
if (pindex->nTx > 0) {
|
||||||
|
if (pindex->pprev) {
|
||||||
|
if (pindex->pprev->nChainTx > 0) {
|
||||||
|
pindex->nChainTx = pindex->pprev->nChainTx + pindex->nTx;
|
||||||
|
} else {
|
||||||
|
pindex->nChainTx = 0;
|
||||||
|
m_blocks_unlinked.insert(std::make_pair(pindex->pprev, pindex));
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
pindex->nChainTx = pindex->nTx;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (!(pindex->nStatus & BLOCK_FAILED_MASK) && pindex->pprev && (pindex->pprev->nStatus & BLOCK_FAILED_MASK)) {
|
||||||
|
pindex->nStatus |= BLOCK_FAILED_CHILD;
|
||||||
|
setDirtyBlockIndex.insert(pindex);
|
||||||
|
}
|
||||||
|
if (pindex->IsAssumedValid() ||
|
||||||
|
(pindex->IsValid(BLOCK_VALID_TRANSACTIONS) &&
|
||||||
|
(pindex->HaveTxsDownloaded() || pindex->pprev == nullptr))) {
|
||||||
|
|
||||||
|
// Fill each chainstate's block candidate set. Only add assumed-valid
|
||||||
|
// blocks to the tip candidate set if the chainstate is allowed to rely on
|
||||||
|
// assumed-valid blocks.
|
||||||
|
//
|
||||||
|
// If all setBlockIndexCandidates contained the assumed-valid blocks, the
|
||||||
|
// background chainstate's ActivateBestChain() call would add assumed-valid
|
||||||
|
// blocks to the chain (based on how FindMostWorkChain() works). Obviously
|
||||||
|
// we don't want this since the purpose of the background validation chain
|
||||||
|
// is to validate assued-valid blocks.
|
||||||
|
//
|
||||||
|
// Note: This is considering all blocks whose height is greater or equal to
|
||||||
|
// the first assumed-valid block to be assumed-valid blocks, and excluding
|
||||||
|
// them from the background chainstate's setBlockIndexCandidates set. This
|
||||||
|
// does mean that some blocks which are not technically assumed-valid
|
||||||
|
// (later blocks on a fork beginning before the first assumed-valid block)
|
||||||
|
// might not get added to the the background chainstate, but this is ok,
|
||||||
|
// because they will still be attached to the active chainstate if they
|
||||||
|
// actually contain more work.
|
||||||
|
//
|
||||||
|
// Instad of this height-based approach, an earlier attempt was made at
|
||||||
|
// detecting "holistically" whether the block index under consideration
|
||||||
|
// relied on an assumed-valid ancestor, but this proved to be too slow to
|
||||||
|
// be practical.
|
||||||
|
for (CChainState* chainstate : chainman.GetAll()) {
|
||||||
|
if (chainstate->reliesOnAssumedValid() ||
|
||||||
|
pindex->nHeight < first_assumed_valid_height) {
|
||||||
|
chainstate->setBlockIndexCandidates.insert(pindex);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (pindex->nStatus & BLOCK_FAILED_MASK && (!chainman.m_best_invalid || pindex->nChainWork > chainman.m_best_invalid->nChainWork)) {
|
||||||
|
chainman.m_best_invalid = pindex;
|
||||||
|
}
|
||||||
|
if (pindex->pprev) {
|
||||||
|
pindex->BuildSkip();
|
||||||
|
}
|
||||||
|
if (pindex->IsValid(BLOCK_VALID_TREE) && (pindexBestHeader == nullptr || CBlockIndexWorkComparator()(pindexBestHeader, pindex)))
|
||||||
|
pindexBestHeader = pindex;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void BlockManager::Unload()
|
||||||
|
{
|
||||||
|
m_blocks_unlinked.clear();
|
||||||
|
|
||||||
|
for (const BlockMap::value_type& entry : m_block_index) {
|
||||||
|
delete entry.second;
|
||||||
|
}
|
||||||
|
|
||||||
|
m_block_index.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool BlockManager::LoadBlockIndexDB(ChainstateManager& chainman)
|
||||||
|
{
|
||||||
|
if (!LoadBlockIndex(::Params().GetConsensus(), chainman)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load block file info
|
||||||
|
m_block_tree_db->ReadLastBlockFile(nLastBlockFile);
|
||||||
|
vinfoBlockFile.resize(nLastBlockFile + 1);
|
||||||
|
LogPrintf("%s: last block file = %i\n", __func__, nLastBlockFile);
|
||||||
|
for (int nFile = 0; nFile <= nLastBlockFile; nFile++) {
|
||||||
|
m_block_tree_db->ReadBlockFileInfo(nFile, vinfoBlockFile[nFile]);
|
||||||
|
}
|
||||||
|
LogPrintf("%s: last block file info: %s\n", __func__, vinfoBlockFile[nLastBlockFile].ToString());
|
||||||
|
for (int nFile = nLastBlockFile + 1; true; nFile++) {
|
||||||
|
CBlockFileInfo info;
|
||||||
|
if (m_block_tree_db->ReadBlockFileInfo(nFile, info)) {
|
||||||
|
vinfoBlockFile.push_back(info);
|
||||||
|
} else {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check presence of blk files
|
||||||
|
LogPrintf("Checking all blk files are present...\n");
|
||||||
|
std::set<int> setBlkDataFiles;
|
||||||
|
for (const std::pair<const uint256, CBlockIndex*>& item : m_block_index) {
|
||||||
|
CBlockIndex* pindex = item.second;
|
||||||
|
if (pindex->nStatus & BLOCK_HAVE_DATA) {
|
||||||
|
setBlkDataFiles.insert(pindex->nFile);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (std::set<int>::iterator it = setBlkDataFiles.begin(); it != setBlkDataFiles.end(); it++) {
|
||||||
|
FlatFilePos pos(*it, 0);
|
||||||
|
if (CAutoFile(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION).IsNull()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check whether we have ever pruned block & undo files
|
||||||
|
m_block_tree_db->ReadFlag("prunedblockfiles", fHavePruned);
|
||||||
|
if (fHavePruned) {
|
||||||
|
LogPrintf("LoadBlockIndexDB(): Block files have previously been pruned\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check whether we need to continue reindexing
|
||||||
|
bool fReindexing = false;
|
||||||
|
m_block_tree_db->ReadReindexing(fReindexing);
|
||||||
|
if (fReindexing) fReindex = true;
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
CBlockIndex* BlockManager::GetLastCheckpoint(const CCheckpointData& data)
|
||||||
|
{
|
||||||
|
const MapCheckpoints& checkpoints = data.mapCheckpoints;
|
||||||
|
|
||||||
|
for (const MapCheckpoints::value_type& i : reverse_iterate(checkpoints)) {
|
||||||
|
const uint256& hash = i.second;
|
||||||
|
CBlockIndex* pindex = LookupBlockIndex(hash);
|
||||||
|
if (pindex) {
|
||||||
|
return pindex;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
bool IsBlockPruned(const CBlockIndex* pblockindex)
|
bool IsBlockPruned(const CBlockIndex* pblockindex)
|
||||||
{
|
{
|
||||||
return (fHavePruned && !(pblockindex->nStatus & BLOCK_HAVE_DATA) && pblockindex->nTx > 0);
|
return (fHavePruned && !(pblockindex->nStatus & BLOCK_HAVE_DATA) && pblockindex->nTx > 0);
|
||||||
|
|
|
@ -7,6 +7,7 @@
|
||||||
|
|
||||||
#include <fs.h>
|
#include <fs.h>
|
||||||
#include <protocol.h> // For CMessageHeader::MessageStartChars
|
#include <protocol.h> // For CMessageHeader::MessageStartChars
|
||||||
|
#include <txdb.h>
|
||||||
|
|
||||||
#include <atomic>
|
#include <atomic>
|
||||||
#include <cstdint>
|
#include <cstdint>
|
||||||
|
@ -20,7 +21,9 @@ class CBlockIndex;
|
||||||
class CBlockUndo;
|
class CBlockUndo;
|
||||||
class CChain;
|
class CChain;
|
||||||
class CChainParams;
|
class CChainParams;
|
||||||
|
class CChainState;
|
||||||
class ChainstateManager;
|
class ChainstateManager;
|
||||||
|
struct CCheckpointData;
|
||||||
struct FlatFilePos;
|
struct FlatFilePos;
|
||||||
namespace Consensus {
|
namespace Consensus {
|
||||||
struct Params;
|
struct Params;
|
||||||
|
@ -45,6 +48,87 @@ extern bool fPruneMode;
|
||||||
/** Number of MiB of block files that we're trying to stay below. */
|
/** Number of MiB of block files that we're trying to stay below. */
|
||||||
extern uint64_t nPruneTarget;
|
extern uint64_t nPruneTarget;
|
||||||
|
|
||||||
|
typedef std::unordered_map<uint256, CBlockIndex*, BlockHasher> BlockMap;
|
||||||
|
|
||||||
|
struct CBlockIndexWorkComparator {
|
||||||
|
bool operator()(const CBlockIndex* pa, const CBlockIndex* pb) const;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Maintains a tree of blocks (stored in `m_block_index`) which is consulted
|
||||||
|
* to determine where the most-work tip is.
|
||||||
|
*
|
||||||
|
* This data is used mostly in `CChainState` - information about, e.g.,
|
||||||
|
* candidate tips is not maintained here.
|
||||||
|
*/
|
||||||
|
class BlockManager
|
||||||
|
{
|
||||||
|
friend CChainState;
|
||||||
|
|
||||||
|
private:
|
||||||
|
/* Calculate the block/rev files to delete based on height specified by user with RPC command pruneblockchain */
|
||||||
|
void FindFilesToPruneManual(std::set<int>& setFilesToPrune, int nManualPruneHeight, int chain_tip_height);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Prune block and undo files (blk???.dat and rev???.dat) so that the disk space used is less than a user-defined target.
|
||||||
|
* The user sets the target (in MB) on the command line or in config file. This will be run on startup and whenever new
|
||||||
|
* space is allocated in a block or undo file, staying below the target. Changing back to unpruned requires a reindex
|
||||||
|
* (which in this case means the blockchain must be re-downloaded.)
|
||||||
|
*
|
||||||
|
* Pruning functions are called from FlushStateToDisk when the global fCheckForPruning flag has been set.
|
||||||
|
* Block and undo files are deleted in lock-step (when blk00003.dat is deleted, so is rev00003.dat.)
|
||||||
|
* Pruning cannot take place until the longest chain is at least a certain length (100000 on mainnet, 1000 on testnet, 1000 on regtest).
|
||||||
|
* Pruning will never delete a block within a defined distance (currently 288) from the active chain's tip.
|
||||||
|
* The block index is updated by unsetting HAVE_DATA and HAVE_UNDO for any blocks that were stored in the deleted files.
|
||||||
|
* A db flag records the fact that at least some block files have been pruned.
|
||||||
|
*
|
||||||
|
* @param[out] setFilesToPrune The set of file indices that can be unlinked will be returned
|
||||||
|
*/
|
||||||
|
void FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfterHeight, int chain_tip_height, int prune_height, bool is_ibd);
|
||||||
|
|
||||||
|
public:
|
||||||
|
BlockMap m_block_index GUARDED_BY(cs_main);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* All pairs A->B, where A (or one of its ancestors) misses transactions, but B has transactions.
|
||||||
|
* Pruned nodes may have entries where B is missing data.
|
||||||
|
*/
|
||||||
|
std::multimap<CBlockIndex*, CBlockIndex*> m_blocks_unlinked;
|
||||||
|
|
||||||
|
std::unique_ptr<CBlockTreeDB> m_block_tree_db GUARDED_BY(::cs_main);
|
||||||
|
|
||||||
|
bool LoadBlockIndexDB(ChainstateManager& chainman) EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Load the blocktree off disk and into memory. Populate certain metadata
|
||||||
|
* per index entry (nStatus, nChainWork, nTimeMax, etc.) as well as peripheral
|
||||||
|
* collections like setDirtyBlockIndex.
|
||||||
|
*/
|
||||||
|
bool LoadBlockIndex(
|
||||||
|
const Consensus::Params& consensus_params,
|
||||||
|
ChainstateManager& chainman) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
|
||||||
|
|
||||||
|
/** Clear all data members. */
|
||||||
|
void Unload() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
|
||||||
|
|
||||||
|
CBlockIndex* AddToBlockIndex(const CBlockHeader& block) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
|
||||||
|
/** Create a new block index entry for a given block hash */
|
||||||
|
CBlockIndex* InsertBlockIndex(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
|
||||||
|
|
||||||
|
//! Mark one block file as pruned (modify associated database entries)
|
||||||
|
void PruneOneBlockFile(const int fileNumber) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
|
||||||
|
|
||||||
|
CBlockIndex* LookupBlockIndex(const uint256& hash) const EXCLUSIVE_LOCKS_REQUIRED(cs_main);
|
||||||
|
|
||||||
|
//! Returns last CBlockIndex* that is a checkpoint
|
||||||
|
CBlockIndex* GetLastCheckpoint(const CCheckpointData& data) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
|
||||||
|
|
||||||
|
~BlockManager()
|
||||||
|
{
|
||||||
|
Unload();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
//! Check whether the block associated with this index entry is pruned or not.
|
//! Check whether the block associated with this index entry is pruned or not.
|
||||||
bool IsBlockPruned(const CBlockIndex* pblockindex);
|
bool IsBlockPruned(const CBlockIndex* pblockindex);
|
||||||
|
|
||||||
|
|
|
@ -143,13 +143,6 @@ extern std::set<int> setDirtyFileInfo;
|
||||||
void FlushBlockFile(bool fFinalize = false, bool finalize_undo = false);
|
void FlushBlockFile(bool fFinalize = false, bool finalize_undo = false);
|
||||||
// ... TODO move fully to blockstorage
|
// ... TODO move fully to blockstorage
|
||||||
|
|
||||||
CBlockIndex* BlockManager::LookupBlockIndex(const uint256& hash) const
|
|
||||||
{
|
|
||||||
AssertLockHeld(cs_main);
|
|
||||||
BlockMap::const_iterator it = m_block_index.find(hash);
|
|
||||||
return it == m_block_index.end() ? nullptr : it->second;
|
|
||||||
}
|
|
||||||
|
|
||||||
CBlockIndex* CChainState::FindForkInGlobalIndex(const CBlockLocator& locator) const
|
CBlockIndex* CChainState::FindForkInGlobalIndex(const CBlockLocator& locator) const
|
||||||
{
|
{
|
||||||
AssertLockHeld(cs_main);
|
AssertLockHeld(cs_main);
|
||||||
|
@ -3125,42 +3118,6 @@ void CChainState::ResetBlockFailureFlags(CBlockIndex *pindex) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
CBlockIndex* BlockManager::AddToBlockIndex(const CBlockHeader& block)
|
|
||||||
{
|
|
||||||
AssertLockHeld(cs_main);
|
|
||||||
|
|
||||||
// Check for duplicate
|
|
||||||
uint256 hash = block.GetHash();
|
|
||||||
BlockMap::iterator it = m_block_index.find(hash);
|
|
||||||
if (it != m_block_index.end())
|
|
||||||
return it->second;
|
|
||||||
|
|
||||||
// Construct new block index object
|
|
||||||
CBlockIndex* pindexNew = new CBlockIndex(block);
|
|
||||||
// We assign the sequence id to blocks only when the full data is available,
|
|
||||||
// to avoid miners withholding blocks but broadcasting headers, to get a
|
|
||||||
// competitive advantage.
|
|
||||||
pindexNew->nSequenceId = 0;
|
|
||||||
BlockMap::iterator mi = m_block_index.insert(std::make_pair(hash, pindexNew)).first;
|
|
||||||
pindexNew->phashBlock = &((*mi).first);
|
|
||||||
BlockMap::iterator miPrev = m_block_index.find(block.hashPrevBlock);
|
|
||||||
if (miPrev != m_block_index.end())
|
|
||||||
{
|
|
||||||
pindexNew->pprev = (*miPrev).second;
|
|
||||||
pindexNew->nHeight = pindexNew->pprev->nHeight + 1;
|
|
||||||
pindexNew->BuildSkip();
|
|
||||||
}
|
|
||||||
pindexNew->nTimeMax = (pindexNew->pprev ? std::max(pindexNew->pprev->nTimeMax, pindexNew->nTime) : pindexNew->nTime);
|
|
||||||
pindexNew->nChainWork = (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) + GetBlockProof(*pindexNew);
|
|
||||||
pindexNew->RaiseValidity(BLOCK_VALID_TREE);
|
|
||||||
if (pindexBestHeader == nullptr || pindexBestHeader->nChainWork < pindexNew->nChainWork)
|
|
||||||
pindexBestHeader = pindexNew;
|
|
||||||
|
|
||||||
setDirtyBlockIndex.insert(pindexNew);
|
|
||||||
|
|
||||||
return pindexNew;
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Mark a block as having its data received and checked (up to BLOCK_VALID_TRANSACTIONS). */
|
/** Mark a block as having its data received and checked (up to BLOCK_VALID_TRANSACTIONS). */
|
||||||
void CChainState::ReceivedBlockTransactions(const CBlock& block, CBlockIndex* pindexNew, const FlatFilePos& pos)
|
void CChainState::ReceivedBlockTransactions(const CBlock& block, CBlockIndex* pindexNew, const FlatFilePos& pos)
|
||||||
{
|
{
|
||||||
|
@ -3327,21 +3284,6 @@ std::vector<unsigned char> GenerateCoinbaseCommitment(CBlock& block, const CBloc
|
||||||
return commitment;
|
return commitment;
|
||||||
}
|
}
|
||||||
|
|
||||||
CBlockIndex* BlockManager::GetLastCheckpoint(const CCheckpointData& data)
|
|
||||||
{
|
|
||||||
const MapCheckpoints& checkpoints = data.mapCheckpoints;
|
|
||||||
|
|
||||||
for (const MapCheckpoints::value_type& i : reverse_iterate(checkpoints))
|
|
||||||
{
|
|
||||||
const uint256& hash = i.second;
|
|
||||||
CBlockIndex* pindex = LookupBlockIndex(hash);
|
|
||||||
if (pindex) {
|
|
||||||
return pindex;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Context-dependent validity checks.
|
/** Context-dependent validity checks.
|
||||||
* By "context", we mean only the previous block headers, but not the UTXO
|
* By "context", we mean only the previous block headers, but not the UTXO
|
||||||
* set; UTXO-related validity checks are done in ConnectBlock().
|
* set; UTXO-related validity checks are done in ConnectBlock().
|
||||||
|
@ -3763,67 +3705,6 @@ bool TestBlockValidity(BlockValidationState& state,
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* BLOCK PRUNING CODE
|
|
||||||
*/
|
|
||||||
|
|
||||||
void BlockManager::PruneOneBlockFile(const int fileNumber)
|
|
||||||
{
|
|
||||||
AssertLockHeld(cs_main);
|
|
||||||
LOCK(cs_LastBlockFile);
|
|
||||||
|
|
||||||
for (const auto& entry : m_block_index) {
|
|
||||||
CBlockIndex* pindex = entry.second;
|
|
||||||
if (pindex->nFile == fileNumber) {
|
|
||||||
pindex->nStatus &= ~BLOCK_HAVE_DATA;
|
|
||||||
pindex->nStatus &= ~BLOCK_HAVE_UNDO;
|
|
||||||
pindex->nFile = 0;
|
|
||||||
pindex->nDataPos = 0;
|
|
||||||
pindex->nUndoPos = 0;
|
|
||||||
setDirtyBlockIndex.insert(pindex);
|
|
||||||
|
|
||||||
// Prune from m_blocks_unlinked -- any block we prune would have
|
|
||||||
// to be downloaded again in order to consider its chain, at which
|
|
||||||
// point it would be considered as a candidate for
|
|
||||||
// m_blocks_unlinked or setBlockIndexCandidates.
|
|
||||||
auto range = m_blocks_unlinked.equal_range(pindex->pprev);
|
|
||||||
while (range.first != range.second) {
|
|
||||||
std::multimap<CBlockIndex *, CBlockIndex *>::iterator _it = range.first;
|
|
||||||
range.first++;
|
|
||||||
if (_it->second == pindex) {
|
|
||||||
m_blocks_unlinked.erase(_it);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
vinfoBlockFile[fileNumber].SetNull();
|
|
||||||
setDirtyFileInfo.insert(fileNumber);
|
|
||||||
}
|
|
||||||
|
|
||||||
void BlockManager::FindFilesToPruneManual(std::set<int>& setFilesToPrune, int nManualPruneHeight, int chain_tip_height)
|
|
||||||
{
|
|
||||||
assert(fPruneMode && nManualPruneHeight > 0);
|
|
||||||
|
|
||||||
LOCK2(cs_main, cs_LastBlockFile);
|
|
||||||
if (chain_tip_height < 0) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// last block to prune is the lesser of (user-specified height, MIN_BLOCKS_TO_KEEP from the tip)
|
|
||||||
unsigned int nLastBlockWeCanPrune = std::min((unsigned)nManualPruneHeight, chain_tip_height - MIN_BLOCKS_TO_KEEP);
|
|
||||||
int count = 0;
|
|
||||||
for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) {
|
|
||||||
if (vinfoBlockFile[fileNumber].nSize == 0 || vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
PruneOneBlockFile(fileNumber);
|
|
||||||
setFilesToPrune.insert(fileNumber);
|
|
||||||
count++;
|
|
||||||
}
|
|
||||||
LogPrintf("Prune (Manual): prune_height=%d removed %d blk/rev pairs\n", nLastBlockWeCanPrune, count);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* This function is called from the RPC code for pruneblockchain */
|
/* This function is called from the RPC code for pruneblockchain */
|
||||||
void PruneBlockFilesManual(CChainState& active_chainstate, int nManualPruneHeight)
|
void PruneBlockFilesManual(CChainState& active_chainstate, int nManualPruneHeight)
|
||||||
{
|
{
|
||||||
|
@ -3834,259 +3715,6 @@ void PruneBlockFilesManual(CChainState& active_chainstate, int nManualPruneHeigh
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void BlockManager::FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfterHeight, int chain_tip_height, int prune_height, bool is_ibd)
|
|
||||||
{
|
|
||||||
LOCK2(cs_main, cs_LastBlockFile);
|
|
||||||
if (chain_tip_height < 0 || nPruneTarget == 0) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
if ((uint64_t)chain_tip_height <= nPruneAfterHeight) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
unsigned int nLastBlockWeCanPrune{(unsigned)std::min(prune_height, chain_tip_height - static_cast<int>(MIN_BLOCKS_TO_KEEP))};
|
|
||||||
uint64_t nCurrentUsage = CalculateCurrentUsage();
|
|
||||||
// We don't check to prune until after we've allocated new space for files
|
|
||||||
// So we should leave a buffer under our target to account for another allocation
|
|
||||||
// before the next pruning.
|
|
||||||
uint64_t nBuffer = BLOCKFILE_CHUNK_SIZE + UNDOFILE_CHUNK_SIZE;
|
|
||||||
uint64_t nBytesToPrune;
|
|
||||||
int count = 0;
|
|
||||||
|
|
||||||
if (nCurrentUsage + nBuffer >= nPruneTarget) {
|
|
||||||
// On a prune event, the chainstate DB is flushed.
|
|
||||||
// To avoid excessive prune events negating the benefit of high dbcache
|
|
||||||
// values, we should not prune too rapidly.
|
|
||||||
// So when pruning in IBD, increase the buffer a bit to avoid a re-prune too soon.
|
|
||||||
if (is_ibd) {
|
|
||||||
// Since this is only relevant during IBD, we use a fixed 10%
|
|
||||||
nBuffer += nPruneTarget / 10;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) {
|
|
||||||
nBytesToPrune = vinfoBlockFile[fileNumber].nSize + vinfoBlockFile[fileNumber].nUndoSize;
|
|
||||||
|
|
||||||
if (vinfoBlockFile[fileNumber].nSize == 0) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (nCurrentUsage + nBuffer < nPruneTarget) { // are we below our target?
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
// don't prune files that could have a block within MIN_BLOCKS_TO_KEEP of the main chain's tip but keep scanning
|
|
||||||
if (vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
PruneOneBlockFile(fileNumber);
|
|
||||||
// Queue up the files for removal
|
|
||||||
setFilesToPrune.insert(fileNumber);
|
|
||||||
nCurrentUsage -= nBytesToPrune;
|
|
||||||
count++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
LogPrint(BCLog::PRUNE, "Prune: target=%dMiB actual=%dMiB diff=%dMiB max_prune_height=%d removed %d blk/rev pairs\n",
|
|
||||||
nPruneTarget/1024/1024, nCurrentUsage/1024/1024,
|
|
||||||
((int64_t)nPruneTarget - (int64_t)nCurrentUsage)/1024/1024,
|
|
||||||
nLastBlockWeCanPrune, count);
|
|
||||||
}
|
|
||||||
|
|
||||||
CBlockIndex * BlockManager::InsertBlockIndex(const uint256& hash)
|
|
||||||
{
|
|
||||||
AssertLockHeld(cs_main);
|
|
||||||
|
|
||||||
if (hash.IsNull())
|
|
||||||
return nullptr;
|
|
||||||
|
|
||||||
// Return existing
|
|
||||||
BlockMap::iterator mi = m_block_index.find(hash);
|
|
||||||
if (mi != m_block_index.end())
|
|
||||||
return (*mi).second;
|
|
||||||
|
|
||||||
// Create new
|
|
||||||
CBlockIndex* pindexNew = new CBlockIndex();
|
|
||||||
mi = m_block_index.insert(std::make_pair(hash, pindexNew)).first;
|
|
||||||
pindexNew->phashBlock = &((*mi).first);
|
|
||||||
|
|
||||||
return pindexNew;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool BlockManager::LoadBlockIndex(
|
|
||||||
const Consensus::Params& consensus_params,
|
|
||||||
ChainstateManager& chainman)
|
|
||||||
{
|
|
||||||
if (!m_block_tree_db->LoadBlockIndexGuts(consensus_params, [this](const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { return this->InsertBlockIndex(hash); })) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Calculate nChainWork
|
|
||||||
std::vector<std::pair<int, CBlockIndex*> > vSortedByHeight;
|
|
||||||
vSortedByHeight.reserve(m_block_index.size());
|
|
||||||
for (const std::pair<const uint256, CBlockIndex*>& item : m_block_index)
|
|
||||||
{
|
|
||||||
CBlockIndex* pindex = item.second;
|
|
||||||
vSortedByHeight.push_back(std::make_pair(pindex->nHeight, pindex));
|
|
||||||
}
|
|
||||||
sort(vSortedByHeight.begin(), vSortedByHeight.end());
|
|
||||||
|
|
||||||
// Find start of assumed-valid region.
|
|
||||||
int first_assumed_valid_height = std::numeric_limits<int>::max();
|
|
||||||
|
|
||||||
for (const auto& [height, block] : vSortedByHeight) {
|
|
||||||
if (block->IsAssumedValid()) {
|
|
||||||
auto chainstates = chainman.GetAll();
|
|
||||||
|
|
||||||
// If we encounter an assumed-valid block index entry, ensure that we have
|
|
||||||
// one chainstate that tolerates assumed-valid entries and another that does
|
|
||||||
// not (i.e. the background validation chainstate), since assumed-valid
|
|
||||||
// entries should always be pending validation by a fully-validated chainstate.
|
|
||||||
auto any_chain = [&](auto fnc) { return std::any_of(chainstates.cbegin(), chainstates.cend(), fnc); };
|
|
||||||
assert(any_chain([](auto chainstate) { return chainstate->reliesOnAssumedValid(); }));
|
|
||||||
assert(any_chain([](auto chainstate) { return !chainstate->reliesOnAssumedValid(); }));
|
|
||||||
|
|
||||||
first_assumed_valid_height = height;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (const std::pair<int, CBlockIndex*>& item : vSortedByHeight)
|
|
||||||
{
|
|
||||||
if (ShutdownRequested()) return false;
|
|
||||||
CBlockIndex* pindex = item.second;
|
|
||||||
pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) + GetBlockProof(*pindex);
|
|
||||||
pindex->nTimeMax = (pindex->pprev ? std::max(pindex->pprev->nTimeMax, pindex->nTime) : pindex->nTime);
|
|
||||||
|
|
||||||
// We can link the chain of blocks for which we've received transactions at some point, or
|
|
||||||
// blocks that are assumed-valid on the basis of snapshot load (see
|
|
||||||
// PopulateAndValidateSnapshot()).
|
|
||||||
// Pruned nodes may have deleted the block.
|
|
||||||
if (pindex->nTx > 0) {
|
|
||||||
if (pindex->pprev) {
|
|
||||||
if (pindex->pprev->nChainTx > 0) {
|
|
||||||
pindex->nChainTx = pindex->pprev->nChainTx + pindex->nTx;
|
|
||||||
} else {
|
|
||||||
pindex->nChainTx = 0;
|
|
||||||
m_blocks_unlinked.insert(std::make_pair(pindex->pprev, pindex));
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
pindex->nChainTx = pindex->nTx;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (!(pindex->nStatus & BLOCK_FAILED_MASK) && pindex->pprev && (pindex->pprev->nStatus & BLOCK_FAILED_MASK)) {
|
|
||||||
pindex->nStatus |= BLOCK_FAILED_CHILD;
|
|
||||||
setDirtyBlockIndex.insert(pindex);
|
|
||||||
}
|
|
||||||
if (pindex->IsAssumedValid() ||
|
|
||||||
(pindex->IsValid(BLOCK_VALID_TRANSACTIONS) &&
|
|
||||||
(pindex->HaveTxsDownloaded() || pindex->pprev == nullptr))) {
|
|
||||||
|
|
||||||
// Fill each chainstate's block candidate set. Only add assumed-valid
|
|
||||||
// blocks to the tip candidate set if the chainstate is allowed to rely on
|
|
||||||
// assumed-valid blocks.
|
|
||||||
//
|
|
||||||
// If all setBlockIndexCandidates contained the assumed-valid blocks, the
|
|
||||||
// background chainstate's ActivateBestChain() call would add assumed-valid
|
|
||||||
// blocks to the chain (based on how FindMostWorkChain() works). Obviously
|
|
||||||
// we don't want this since the purpose of the background validation chain
|
|
||||||
// is to validate assued-valid blocks.
|
|
||||||
//
|
|
||||||
// Note: This is considering all blocks whose height is greater or equal to
|
|
||||||
// the first assumed-valid block to be assumed-valid blocks, and excluding
|
|
||||||
// them from the background chainstate's setBlockIndexCandidates set. This
|
|
||||||
// does mean that some blocks which are not technically assumed-valid
|
|
||||||
// (later blocks on a fork beginning before the first assumed-valid block)
|
|
||||||
// might not get added to the the background chainstate, but this is ok,
|
|
||||||
// because they will still be attached to the active chainstate if they
|
|
||||||
// actually contain more work.
|
|
||||||
//
|
|
||||||
// Instad of this height-based approach, an earlier attempt was made at
|
|
||||||
// detecting "holistically" whether the block index under consideration
|
|
||||||
// relied on an assumed-valid ancestor, but this proved to be too slow to
|
|
||||||
// be practical.
|
|
||||||
for (CChainState* chainstate : chainman.GetAll()) {
|
|
||||||
if (chainstate->reliesOnAssumedValid() ||
|
|
||||||
pindex->nHeight < first_assumed_valid_height) {
|
|
||||||
chainstate->setBlockIndexCandidates.insert(pindex);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (pindex->nStatus & BLOCK_FAILED_MASK && (!chainman.m_best_invalid || pindex->nChainWork > chainman.m_best_invalid->nChainWork)) {
|
|
||||||
chainman.m_best_invalid = pindex;
|
|
||||||
}
|
|
||||||
if (pindex->pprev)
|
|
||||||
pindex->BuildSkip();
|
|
||||||
if (pindex->IsValid(BLOCK_VALID_TREE) && (pindexBestHeader == nullptr || CBlockIndexWorkComparator()(pindexBestHeader, pindex)))
|
|
||||||
pindexBestHeader = pindex;
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
void BlockManager::Unload() {
|
|
||||||
m_blocks_unlinked.clear();
|
|
||||||
|
|
||||||
for (const BlockMap::value_type& entry : m_block_index) {
|
|
||||||
delete entry.second;
|
|
||||||
}
|
|
||||||
|
|
||||||
m_block_index.clear();
|
|
||||||
}
|
|
||||||
|
|
||||||
bool BlockManager::LoadBlockIndexDB(ChainstateManager& chainman)
|
|
||||||
{
|
|
||||||
if (!LoadBlockIndex(::Params().GetConsensus(), chainman)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load block file info
|
|
||||||
m_block_tree_db->ReadLastBlockFile(nLastBlockFile);
|
|
||||||
vinfoBlockFile.resize(nLastBlockFile + 1);
|
|
||||||
LogPrintf("%s: last block file = %i\n", __func__, nLastBlockFile);
|
|
||||||
for (int nFile = 0; nFile <= nLastBlockFile; nFile++) {
|
|
||||||
m_block_tree_db->ReadBlockFileInfo(nFile, vinfoBlockFile[nFile]);
|
|
||||||
}
|
|
||||||
LogPrintf("%s: last block file info: %s\n", __func__, vinfoBlockFile[nLastBlockFile].ToString());
|
|
||||||
for (int nFile = nLastBlockFile + 1; true; nFile++) {
|
|
||||||
CBlockFileInfo info;
|
|
||||||
if (m_block_tree_db->ReadBlockFileInfo(nFile, info)) {
|
|
||||||
vinfoBlockFile.push_back(info);
|
|
||||||
} else {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check presence of blk files
|
|
||||||
LogPrintf("Checking all blk files are present...\n");
|
|
||||||
std::set<int> setBlkDataFiles;
|
|
||||||
for (const std::pair<const uint256, CBlockIndex*>& item : m_block_index) {
|
|
||||||
CBlockIndex* pindex = item.second;
|
|
||||||
if (pindex->nStatus & BLOCK_HAVE_DATA) {
|
|
||||||
setBlkDataFiles.insert(pindex->nFile);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for (std::set<int>::iterator it = setBlkDataFiles.begin(); it != setBlkDataFiles.end(); it++)
|
|
||||||
{
|
|
||||||
FlatFilePos pos(*it, 0);
|
|
||||||
if (CAutoFile(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION).IsNull()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check whether we have ever pruned block & undo files
|
|
||||||
m_block_tree_db->ReadFlag("prunedblockfiles", fHavePruned);
|
|
||||||
if (fHavePruned)
|
|
||||||
LogPrintf("LoadBlockIndexDB(): Block files have previously been pruned\n");
|
|
||||||
|
|
||||||
// Check whether we need to continue reindexing
|
|
||||||
bool fReindexing = false;
|
|
||||||
m_block_tree_db->ReadReindexing(fReindexing);
|
|
||||||
if(fReindexing) fReindex = true;
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
void CChainState::LoadMempool(const ArgsManager& args)
|
void CChainState::LoadMempool(const ArgsManager& args)
|
||||||
{
|
{
|
||||||
if (!m_mempool) return;
|
if (!m_mempool) return;
|
||||||
|
|
|
@ -15,6 +15,7 @@
|
||||||
#include <chain.h>
|
#include <chain.h>
|
||||||
#include <consensus/amount.h>
|
#include <consensus/amount.h>
|
||||||
#include <fs.h>
|
#include <fs.h>
|
||||||
|
#include <node/blockstorage.h>
|
||||||
#include <policy/feerate.h>
|
#include <policy/feerate.h>
|
||||||
#include <policy/packages.h>
|
#include <policy/packages.h>
|
||||||
#include <script/script_error.h>
|
#include <script/script_error.h>
|
||||||
|
@ -40,7 +41,6 @@
|
||||||
class CChainState;
|
class CChainState;
|
||||||
class CBlockTreeDB;
|
class CBlockTreeDB;
|
||||||
class CChainParams;
|
class CChainParams;
|
||||||
struct CCheckpointData;
|
|
||||||
class CTxMemPool;
|
class CTxMemPool;
|
||||||
class ChainstateManager;
|
class ChainstateManager;
|
||||||
class SnapshotMetadata;
|
class SnapshotMetadata;
|
||||||
|
@ -107,7 +107,6 @@ enum class SynchronizationState {
|
||||||
};
|
};
|
||||||
|
|
||||||
extern RecursiveMutex cs_main;
|
extern RecursiveMutex cs_main;
|
||||||
typedef std::unordered_map<uint256, CBlockIndex*, BlockHasher> BlockMap;
|
|
||||||
extern Mutex g_best_block_mutex;
|
extern Mutex g_best_block_mutex;
|
||||||
extern std::condition_variable g_best_block_cv;
|
extern std::condition_variable g_best_block_cv;
|
||||||
/** Used to notify getblocktemplate RPC of new tips. */
|
/** Used to notify getblocktemplate RPC of new tips. */
|
||||||
|
@ -381,85 +380,6 @@ enum class FlushStateMode {
|
||||||
ALWAYS
|
ALWAYS
|
||||||
};
|
};
|
||||||
|
|
||||||
struct CBlockIndexWorkComparator
|
|
||||||
{
|
|
||||||
bool operator()(const CBlockIndex *pa, const CBlockIndex *pb) const;
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Maintains a tree of blocks (stored in `m_block_index`) which is consulted
|
|
||||||
* to determine where the most-work tip is.
|
|
||||||
*
|
|
||||||
* This data is used mostly in `CChainState` - information about, e.g.,
|
|
||||||
* candidate tips is not maintained here.
|
|
||||||
*/
|
|
||||||
class BlockManager
|
|
||||||
{
|
|
||||||
friend CChainState;
|
|
||||||
|
|
||||||
private:
|
|
||||||
/* Calculate the block/rev files to delete based on height specified by user with RPC command pruneblockchain */
|
|
||||||
void FindFilesToPruneManual(std::set<int>& setFilesToPrune, int nManualPruneHeight, int chain_tip_height);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Prune block and undo files (blk???.dat and rev???.dat) so that the disk space used is less than a user-defined target.
|
|
||||||
* The user sets the target (in MB) on the command line or in config file. This will be run on startup and whenever new
|
|
||||||
* space is allocated in a block or undo file, staying below the target. Changing back to unpruned requires a reindex
|
|
||||||
* (which in this case means the blockchain must be re-downloaded.)
|
|
||||||
*
|
|
||||||
* Pruning functions are called from FlushStateToDisk when the global fCheckForPruning flag has been set.
|
|
||||||
* Block and undo files are deleted in lock-step (when blk00003.dat is deleted, so is rev00003.dat.)
|
|
||||||
* Pruning cannot take place until the longest chain is at least a certain length (100000 on mainnet, 1000 on testnet, 1000 on regtest).
|
|
||||||
* Pruning will never delete a block within a defined distance (currently 288) from the active chain's tip.
|
|
||||||
* The block index is updated by unsetting HAVE_DATA and HAVE_UNDO for any blocks that were stored in the deleted files.
|
|
||||||
* A db flag records the fact that at least some block files have been pruned.
|
|
||||||
*
|
|
||||||
* @param[out] setFilesToPrune The set of file indices that can be unlinked will be returned
|
|
||||||
*/
|
|
||||||
void FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfterHeight, int chain_tip_height, int prune_height, bool is_ibd);
|
|
||||||
|
|
||||||
public:
|
|
||||||
BlockMap m_block_index GUARDED_BY(cs_main);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* All pairs A->B, where A (or one of its ancestors) misses transactions, but B has transactions.
|
|
||||||
* Pruned nodes may have entries where B is missing data.
|
|
||||||
*/
|
|
||||||
std::multimap<CBlockIndex*, CBlockIndex*> m_blocks_unlinked;
|
|
||||||
|
|
||||||
std::unique_ptr<CBlockTreeDB> m_block_tree_db GUARDED_BY(::cs_main);
|
|
||||||
|
|
||||||
bool LoadBlockIndexDB(ChainstateManager& chainman) EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Load the blocktree off disk and into memory. Populate certain metadata
|
|
||||||
* per index entry (nStatus, nChainWork, nTimeMax, etc.) as well as peripheral
|
|
||||||
* collections like setDirtyBlockIndex.
|
|
||||||
*/
|
|
||||||
bool LoadBlockIndex(
|
|
||||||
const Consensus::Params& consensus_params,
|
|
||||||
ChainstateManager& chainman) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
|
|
||||||
|
|
||||||
/** Clear all data members. */
|
|
||||||
void Unload() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
|
|
||||||
|
|
||||||
CBlockIndex* AddToBlockIndex(const CBlockHeader& block) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
|
|
||||||
/** Create a new block index entry for a given block hash */
|
|
||||||
CBlockIndex* InsertBlockIndex(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
|
|
||||||
|
|
||||||
//! Mark one block file as pruned (modify associated database entries)
|
|
||||||
void PruneOneBlockFile(const int fileNumber) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
|
|
||||||
|
|
||||||
CBlockIndex* LookupBlockIndex(const uint256& hash) const EXCLUSIVE_LOCKS_REQUIRED(cs_main);
|
|
||||||
|
|
||||||
//! Returns last CBlockIndex* that is a checkpoint
|
|
||||||
CBlockIndex* GetLastCheckpoint(const CCheckpointData& data) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
|
|
||||||
|
|
||||||
~BlockManager() {
|
|
||||||
Unload();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A convenience class for constructing the CCoinsView* hierarchy used
|
* A convenience class for constructing the CCoinsView* hierarchy used
|
||||||
* to facilitate access to the UTXO set.
|
* to facilitate access to the UTXO set.
|
||||||
|
|
Loading…
Add table
Reference in a new issue