diff --git a/src/net_processing.cpp b/src/net_processing.cpp index e31ca81a08..27c426b811 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -661,9 +661,9 @@ private: */ bool MaybeSendGetHeaders(CNode& pfrom, const CBlockLocator& locator, Peer& peer) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); /** Potentially fetch blocks from this peer upon receipt of a new headers tip */ - void HeadersDirectFetchBlocks(CNode& pfrom, const Peer& peer, const CBlockIndex* pindexLast); + void HeadersDirectFetchBlocks(CNode& pfrom, const Peer& peer, const CBlockIndex& last_header); /** Update peer state based on received headers message */ - void UpdatePeerStateForReceivedHeaders(CNode& pfrom, const CBlockIndex *pindexLast, bool received_new_header, bool may_have_more_headers); + void UpdatePeerStateForReceivedHeaders(CNode& pfrom, const CBlockIndex& last_header, bool received_new_header, bool may_have_more_headers); void SendBlockTransactions(CNode& pfrom, Peer& peer, const CBlock& block, const BlockTransactionsRequest& req); @@ -2622,22 +2622,21 @@ bool PeerManagerImpl::MaybeSendGetHeaders(CNode& pfrom, const CBlockLocator& loc } /* - * Given a new headers tip ending in pindexLast, potentially request blocks towards that tip. + * Given a new headers tip ending in last_header, potentially request blocks towards that tip. * We require that the given tip have at least as much work as our tip, and for * our current tip to be "close to synced" (see CanDirectFetch()). */ -void PeerManagerImpl::HeadersDirectFetchBlocks(CNode& pfrom, const Peer& peer, const CBlockIndex* pindexLast) +void PeerManagerImpl::HeadersDirectFetchBlocks(CNode& pfrom, const Peer& peer, const CBlockIndex& last_header) { const CNetMsgMaker msgMaker(pfrom.GetCommonVersion()); LOCK(cs_main); CNodeState *nodestate = State(pfrom.GetId()); - if (CanDirectFetch() && pindexLast->IsValid(BLOCK_VALID_TREE) && m_chainman.ActiveChain().Tip()->nChainWork <= pindexLast->nChainWork) { - + if (CanDirectFetch() && last_header.IsValid(BLOCK_VALID_TREE) && m_chainman.ActiveChain().Tip()->nChainWork <= last_header.nChainWork) { std::vector vToFetch; - const CBlockIndex *pindexWalk = pindexLast; - // Calculate all the blocks we'd need to switch to pindexLast, up to a limit. + const CBlockIndex* pindexWalk{&last_header}; + // Calculate all the blocks we'd need to switch to last_header, up to a limit. while (pindexWalk && !m_chainman.ActiveChain().Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) { if (!(pindexWalk->nStatus & BLOCK_HAVE_DATA) && !IsBlockRequested(pindexWalk->GetBlockHash()) && @@ -2653,8 +2652,8 @@ void PeerManagerImpl::HeadersDirectFetchBlocks(CNode& pfrom, const Peer& peer, c // direct fetch and rely on parallel download instead. if (!m_chainman.ActiveChain().Contains(pindexWalk)) { LogPrint(BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n", - pindexLast->GetBlockHash().ToString(), - pindexLast->nHeight); + last_header.GetBlockHash().ToString(), + last_header.nHeight); } else { std::vector vGetData; // Download as much as possible, from earliest to latest. @@ -2671,14 +2670,15 @@ void PeerManagerImpl::HeadersDirectFetchBlocks(CNode& pfrom, const Peer& peer, c } if (vGetData.size() > 1) { LogPrint(BCLog::NET, "Downloading blocks toward %s (%d) via headers direct fetch\n", - pindexLast->GetBlockHash().ToString(), pindexLast->nHeight); + last_header.GetBlockHash().ToString(), + last_header.nHeight); } if (vGetData.size() > 0) { if (!m_ignore_incoming_txs && nodestate->m_provides_cmpctblocks && vGetData.size() == 1 && mapBlocksInFlight.size() == 1 && - pindexLast->pprev->IsValid(BLOCK_VALID_CHAIN)) { + last_header.pprev->IsValid(BLOCK_VALID_CHAIN)) { // In any case, we want to download using a compact block, not a regular one vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash); } @@ -2689,12 +2689,12 @@ void PeerManagerImpl::HeadersDirectFetchBlocks(CNode& pfrom, const Peer& peer, c } /** - * Given receipt of headers from a peer ending in pindexLast, along with + * Given receipt of headers from a peer ending in last_header, along with * whether that header was new and whether the headers message was full, * update the state we keep for the peer. */ void PeerManagerImpl::UpdatePeerStateForReceivedHeaders(CNode& pfrom, - const CBlockIndex *pindexLast, bool received_new_header, bool may_have_more_headers) + const CBlockIndex& last_header, bool received_new_header, bool may_have_more_headers) { LOCK(cs_main); CNodeState *nodestate = State(pfrom.GetId()); @@ -2703,14 +2703,13 @@ void PeerManagerImpl::UpdatePeerStateForReceivedHeaders(CNode& pfrom, } nodestate->nUnconnectingHeaders = 0; - assert(pindexLast); - UpdateBlockAvailability(pfrom.GetId(), pindexLast->GetBlockHash()); + UpdateBlockAvailability(pfrom.GetId(), last_header.GetBlockHash()); // From here, pindexBestKnownBlock should be guaranteed to be non-null, // because it is set in UpdateBlockAvailability. Some nullptr checks // are still present, however, as belt-and-suspenders. - if (received_new_header && pindexLast->nChainWork > m_chainman.ActiveChain().Tip()->nChainWork) { + if (received_new_header && last_header.nChainWork > m_chainman.ActiveChain().Tip()->nChainWork) { nodestate->m_last_block_announcement = GetTime(); } @@ -2876,7 +2875,7 @@ void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, Peer& peer, return; } } - Assume(pindexLast); + assert(pindexLast); // Consider fetching more headers if we are not using our headers-sync mechanism. if (nCount == MAX_HEADERS_RESULTS && !have_headers_sync) { @@ -2887,10 +2886,10 @@ void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, Peer& peer, } } - UpdatePeerStateForReceivedHeaders(pfrom, pindexLast, received_new_header, nCount == MAX_HEADERS_RESULTS); + UpdatePeerStateForReceivedHeaders(pfrom, *pindexLast, received_new_header, nCount == MAX_HEADERS_RESULTS); // Consider immediately downloading blocks. - HeadersDirectFetchBlocks(pfrom, peer, pindexLast); + HeadersDirectFetchBlocks(pfrom, peer, *pindexLast); return; }