mirror of
https://github.com/bitcoin/bitcoin.git
synced 2025-02-08 10:31:50 -05:00
Move headers-direct-fetch logic into own function
This commit is contained in:
parent
bf8ea6df75
commit
29c4518522
1 changed files with 72 additions and 54 deletions
|
@ -569,6 +569,8 @@ private:
|
|||
bool CheckHeadersAreContinuous(const std::vector<CBlockHeader>& headers) const;
|
||||
/** Request further headers from this peer from a given block header */
|
||||
void FetchMoreHeaders(CNode& pfrom, const CBlockIndex *pindexLast, const Peer& peer);
|
||||
/** Potentially fetch blocks from this peer upon receipt of new headers tip */
|
||||
void HeadersDirectFetchBlocks(CNode& pfrom, const CBlockIndex* pindexLast);
|
||||
|
||||
void SendBlockTransactions(CNode& pfrom, Peer& peer, const CBlock& block, const BlockTransactionsRequest& req);
|
||||
|
||||
|
@ -2276,6 +2278,73 @@ void PeerManagerImpl::FetchMoreHeaders(CNode& pfrom, const CBlockIndex *pindexLa
|
|||
m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, m_chainman.ActiveChain().GetLocator(pindexLast), uint256()));
|
||||
}
|
||||
|
||||
/*
|
||||
* Given a new headers tip ending in pindexLast, potentially request blocks towards that tip.
|
||||
* We require that the given tip have at least as much work as our tip, and for
|
||||
* our current tip to be "close to synced" (see CanDirectFetch()).
|
||||
*/
|
||||
void PeerManagerImpl::HeadersDirectFetchBlocks(CNode& pfrom, const CBlockIndex* pindexLast)
|
||||
{
|
||||
const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
|
||||
|
||||
LOCK(cs_main);
|
||||
CNodeState *nodestate = State(pfrom.GetId());
|
||||
|
||||
if (CanDirectFetch() && pindexLast->IsValid(BLOCK_VALID_TREE) && m_chainman.ActiveChain().Tip()->nChainWork <= pindexLast->nChainWork) {
|
||||
|
||||
std::vector<const CBlockIndex*> vToFetch;
|
||||
const CBlockIndex *pindexWalk = pindexLast;
|
||||
// Calculate all the blocks we'd need to switch to pindexLast, up to a limit.
|
||||
while (pindexWalk && !m_chainman.ActiveChain().Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
|
||||
if (!(pindexWalk->nStatus & BLOCK_HAVE_DATA) &&
|
||||
!IsBlockRequested(pindexWalk->GetBlockHash()) &&
|
||||
(!DeploymentActiveAt(*pindexWalk, m_chainman, Consensus::DEPLOYMENT_SEGWIT) || State(pfrom.GetId())->fHaveWitness)) {
|
||||
// We don't have this block, and it's not yet in flight.
|
||||
vToFetch.push_back(pindexWalk);
|
||||
}
|
||||
pindexWalk = pindexWalk->pprev;
|
||||
}
|
||||
// If pindexWalk still isn't on our main chain, we're looking at a
|
||||
// very large reorg at a time we think we're close to caught up to
|
||||
// the main chain -- this shouldn't really happen. Bail out on the
|
||||
// direct fetch and rely on parallel download instead.
|
||||
if (!m_chainman.ActiveChain().Contains(pindexWalk)) {
|
||||
LogPrint(BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n",
|
||||
pindexLast->GetBlockHash().ToString(),
|
||||
pindexLast->nHeight);
|
||||
} else {
|
||||
std::vector<CInv> vGetData;
|
||||
// Download as much as possible, from earliest to latest.
|
||||
for (const CBlockIndex *pindex : reverse_iterate(vToFetch)) {
|
||||
if (nodestate->nBlocksInFlight >= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
|
||||
// Can't download any more from this peer
|
||||
break;
|
||||
}
|
||||
uint32_t nFetchFlags = GetFetchFlags(pfrom);
|
||||
vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()));
|
||||
BlockRequested(pfrom.GetId(), *pindex);
|
||||
LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n",
|
||||
pindex->GetBlockHash().ToString(), pfrom.GetId());
|
||||
}
|
||||
if (vGetData.size() > 1) {
|
||||
LogPrint(BCLog::NET, "Downloading blocks toward %s (%d) via headers direct fetch\n",
|
||||
pindexLast->GetBlockHash().ToString(), pindexLast->nHeight);
|
||||
}
|
||||
if (vGetData.size() > 0) {
|
||||
if (!m_ignore_incoming_txs &&
|
||||
nodestate->m_provides_cmpctblocks &&
|
||||
vGetData.size() == 1 &&
|
||||
mapBlocksInFlight.size() == 1 &&
|
||||
pindexLast->pprev->IsValid(BLOCK_VALID_CHAIN)) {
|
||||
// In any case, we want to download using a compact block, not a regular one
|
||||
vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash);
|
||||
}
|
||||
m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vGetData));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, Peer& peer,
|
||||
const std::vector<CBlockHeader>& headers,
|
||||
bool via_compact_block)
|
||||
|
@ -2355,60 +2424,9 @@ void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, Peer& peer,
|
|||
nodestate->m_last_block_announcement = GetTime();
|
||||
}
|
||||
|
||||
// If this set of headers is valid and ends in a block with at least as
|
||||
// much work as our tip, download as much as possible.
|
||||
if (CanDirectFetch() && pindexLast->IsValid(BLOCK_VALID_TREE) && m_chainman.ActiveChain().Tip()->nChainWork <= pindexLast->nChainWork) {
|
||||
std::vector<const CBlockIndex*> vToFetch;
|
||||
const CBlockIndex *pindexWalk = pindexLast;
|
||||
// Calculate all the blocks we'd need to switch to pindexLast, up to a limit.
|
||||
while (pindexWalk && !m_chainman.ActiveChain().Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
|
||||
if (!(pindexWalk->nStatus & BLOCK_HAVE_DATA) &&
|
||||
!IsBlockRequested(pindexWalk->GetBlockHash()) &&
|
||||
(!DeploymentActiveAt(*pindexWalk, m_chainman, Consensus::DEPLOYMENT_SEGWIT) || State(pfrom.GetId())->fHaveWitness)) {
|
||||
// We don't have this block, and it's not yet in flight.
|
||||
vToFetch.push_back(pindexWalk);
|
||||
}
|
||||
pindexWalk = pindexWalk->pprev;
|
||||
}
|
||||
// If pindexWalk still isn't on our main chain, we're looking at a
|
||||
// very large reorg at a time we think we're close to caught up to
|
||||
// the main chain -- this shouldn't really happen. Bail out on the
|
||||
// direct fetch and rely on parallel download instead.
|
||||
if (!m_chainman.ActiveChain().Contains(pindexWalk)) {
|
||||
LogPrint(BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n",
|
||||
pindexLast->GetBlockHash().ToString(),
|
||||
pindexLast->nHeight);
|
||||
} else {
|
||||
std::vector<CInv> vGetData;
|
||||
// Download as much as possible, from earliest to latest.
|
||||
for (const CBlockIndex *pindex : reverse_iterate(vToFetch)) {
|
||||
if (nodestate->nBlocksInFlight >= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
|
||||
// Can't download any more from this peer
|
||||
break;
|
||||
}
|
||||
uint32_t nFetchFlags = GetFetchFlags(pfrom);
|
||||
vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()));
|
||||
BlockRequested(pfrom.GetId(), *pindex);
|
||||
LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n",
|
||||
pindex->GetBlockHash().ToString(), pfrom.GetId());
|
||||
}
|
||||
if (vGetData.size() > 1) {
|
||||
LogPrint(BCLog::NET, "Downloading blocks toward %s (%d) via headers direct fetch\n",
|
||||
pindexLast->GetBlockHash().ToString(), pindexLast->nHeight);
|
||||
}
|
||||
if (vGetData.size() > 0) {
|
||||
if (!m_ignore_incoming_txs &&
|
||||
nodestate->m_provides_cmpctblocks &&
|
||||
vGetData.size() == 1 &&
|
||||
mapBlocksInFlight.size() == 1 &&
|
||||
pindexLast->pprev->IsValid(BLOCK_VALID_CHAIN)) {
|
||||
// In any case, we want to download using a compact block, not a regular one
|
||||
vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash);
|
||||
}
|
||||
m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vGetData));
|
||||
}
|
||||
}
|
||||
}
|
||||
// Consider immediately downloading blocks.
|
||||
HeadersDirectFetchBlocks(pfrom, pindexLast);
|
||||
|
||||
// If we're in IBD, we want outbound peers that will serve us a useful
|
||||
// chain. Disconnect peers that are on chains with insufficient work.
|
||||
if (m_chainman.ActiveChainstate().IsInitialBlockDownload() && nCount != MAX_HEADERS_RESULTS) {
|
||||
|
|
Loading…
Add table
Reference in a new issue