mirror of
https://github.com/bitcoin/bitcoin.git
synced 2025-02-10 10:52:31 -05:00
Merge bitcoin/bitcoin#25960: p2p: Headers-sync followups
94af3e43e2
Fix typo from PR25717 (Suhas Daftuar)e5982ecdc4
Bypass headers anti-DoS checks for NoBan peers (Suhas Daftuar)132ed7eaaa
Move headerssync logging to BCLog::NET (Suhas Daftuar) Pull request description: Remove BCLog::HEADERSSYNC and move all headerssync logging to BCLog::NET. Bypass headers anti-DoS checks for NoBan peers Also fix a typo that was introduced in PR25717. ACKs for top commit: Sjors: tACK94af3e43e2
ajtowns: ACK94af3e43e2
sipa: ACK94af3e43e2
naumenkogs: ACK94af3e43e2
w0xlt: ACK94af3e43e2
Tree-SHA512: 612d594eddace977359bcc8234b2093d273fd50662f4ac70cb90903d28fb831f6e1aecff51a4ef6c0bb0f6fb5d1aa7ff1eb8798fac5ac142783788f3080717dc
This commit is contained in:
commit
6ab84709fc
6 changed files with 46 additions and 23 deletions
|
@ -42,7 +42,7 @@ HeadersSyncState::HeadersSyncState(NodeId id, const Consensus::Params& consensus
|
||||||
// could try again, if necessary, to sync a longer chain).
|
// could try again, if necessary, to sync a longer chain).
|
||||||
m_max_commitments = 6*(Ticks<std::chrono::seconds>(GetAdjustedTime() - NodeSeconds{std::chrono::seconds{chain_start->GetMedianTimePast()}}) + MAX_FUTURE_BLOCK_TIME) / HEADER_COMMITMENT_PERIOD;
|
m_max_commitments = 6*(Ticks<std::chrono::seconds>(GetAdjustedTime() - NodeSeconds{std::chrono::seconds{chain_start->GetMedianTimePast()}}) + MAX_FUTURE_BLOCK_TIME) / HEADER_COMMITMENT_PERIOD;
|
||||||
|
|
||||||
LogPrint(BCLog::HEADERSSYNC, "Initial headers sync started with peer=%d: height=%i, max_commitments=%i, min_work=%s\n", m_id, m_current_height, m_max_commitments, m_minimum_required_work.ToString());
|
LogPrint(BCLog::NET, "Initial headers sync started with peer=%d: height=%i, max_commitments=%i, min_work=%s\n", m_id, m_current_height, m_max_commitments, m_minimum_required_work.ToString());
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Free any memory in use, and mark this object as no longer usable. This is
|
/** Free any memory in use, and mark this object as no longer usable. This is
|
||||||
|
@ -92,7 +92,7 @@ HeadersSyncState::ProcessingResult HeadersSyncState::ProcessNextHeaders(const
|
||||||
// If we're in PRESYNC and we get a non-full headers
|
// If we're in PRESYNC and we get a non-full headers
|
||||||
// message, then the peer's chain has ended and definitely doesn't
|
// message, then the peer's chain has ended and definitely doesn't
|
||||||
// have enough work, so we can stop our sync.
|
// have enough work, so we can stop our sync.
|
||||||
LogPrint(BCLog::HEADERSSYNC, "Initial headers sync aborted with peer=%d: incomplete headers message at height=%i (presync phase)\n", m_id, m_current_height);
|
LogPrint(BCLog::NET, "Initial headers sync aborted with peer=%d: incomplete headers message at height=%i (presync phase)\n", m_id, m_current_height);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else if (m_download_state == State::REDOWNLOAD) {
|
} else if (m_download_state == State::REDOWNLOAD) {
|
||||||
|
@ -118,7 +118,7 @@ HeadersSyncState::ProcessingResult HeadersSyncState::ProcessNextHeaders(const
|
||||||
// If we hit our target blockhash, then all remaining headers will be
|
// If we hit our target blockhash, then all remaining headers will be
|
||||||
// returned and we can clear any leftover internal state.
|
// returned and we can clear any leftover internal state.
|
||||||
if (m_redownloaded_headers.empty() && m_process_all_remaining_headers) {
|
if (m_redownloaded_headers.empty() && m_process_all_remaining_headers) {
|
||||||
LogPrint(BCLog::HEADERSSYNC, "Initial headers sync complete with peer=%d: releasing all at height=%i (redownload phase)\n", m_id, m_redownload_buffer_last_height);
|
LogPrint(BCLog::NET, "Initial headers sync complete with peer=%d: releasing all at height=%i (redownload phase)\n", m_id, m_redownload_buffer_last_height);
|
||||||
} else if (full_headers_message) {
|
} else if (full_headers_message) {
|
||||||
// If the headers message is full, we need to request more.
|
// If the headers message is full, we need to request more.
|
||||||
ret.request_more = true;
|
ret.request_more = true;
|
||||||
|
@ -127,7 +127,7 @@ HeadersSyncState::ProcessingResult HeadersSyncState::ProcessNextHeaders(const
|
||||||
// declining to serve us that full chain again. Give up.
|
// declining to serve us that full chain again. Give up.
|
||||||
// Note that there's no more processing to be done with these
|
// Note that there's no more processing to be done with these
|
||||||
// headers, so we can still return success.
|
// headers, so we can still return success.
|
||||||
LogPrint(BCLog::HEADERSSYNC, "Initial headers sync aborted with peer=%d: incomplete headers message at height=%i (redownload phase)\n", m_id, m_redownload_buffer_last_height);
|
LogPrint(BCLog::NET, "Initial headers sync aborted with peer=%d: incomplete headers message at height=%i (redownload phase)\n", m_id, m_redownload_buffer_last_height);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -150,7 +150,7 @@ bool HeadersSyncState::ValidateAndStoreHeadersCommitments(const std::vector<CBlo
|
||||||
// This might be benign -- perhaps our peer reorged away from the chain
|
// This might be benign -- perhaps our peer reorged away from the chain
|
||||||
// they were on. Give up on this sync for now (likely we will start a
|
// they were on. Give up on this sync for now (likely we will start a
|
||||||
// new sync with a new starting point).
|
// new sync with a new starting point).
|
||||||
LogPrint(BCLog::HEADERSSYNC, "Initial headers sync aborted with peer=%d: non-continuous headers at height=%i (presync phase)\n", m_id, m_current_height);
|
LogPrint(BCLog::NET, "Initial headers sync aborted with peer=%d: non-continuous headers at height=%i (presync phase)\n", m_id, m_current_height);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -169,7 +169,7 @@ bool HeadersSyncState::ValidateAndStoreHeadersCommitments(const std::vector<CBlo
|
||||||
m_redownload_buffer_last_hash = m_chain_start->GetBlockHash();
|
m_redownload_buffer_last_hash = m_chain_start->GetBlockHash();
|
||||||
m_redownload_chain_work = m_chain_start->nChainWork;
|
m_redownload_chain_work = m_chain_start->nChainWork;
|
||||||
m_download_state = State::REDOWNLOAD;
|
m_download_state = State::REDOWNLOAD;
|
||||||
LogPrint(BCLog::HEADERSSYNC, "Initial headers sync transition with peer=%d: reached sufficient work at height=%i, redownloading from height=%i\n", m_id, m_current_height, m_redownload_buffer_last_height);
|
LogPrint(BCLog::NET, "Initial headers sync transition with peer=%d: reached sufficient work at height=%i, redownloading from height=%i\n", m_id, m_current_height, m_redownload_buffer_last_height);
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -188,7 +188,7 @@ bool HeadersSyncState::ValidateAndProcessSingleHeader(const CBlockHeader& curren
|
||||||
// adjustment maximum.
|
// adjustment maximum.
|
||||||
if (!PermittedDifficultyTransition(m_consensus_params, next_height,
|
if (!PermittedDifficultyTransition(m_consensus_params, next_height,
|
||||||
m_last_header_received.nBits, current.nBits)) {
|
m_last_header_received.nBits, current.nBits)) {
|
||||||
LogPrint(BCLog::HEADERSSYNC, "Initial headers sync aborted with peer=%d: invalid difficulty transition at height=%i (presync phase)\n", m_id, next_height);
|
LogPrint(BCLog::NET, "Initial headers sync aborted with peer=%d: invalid difficulty transition at height=%i (presync phase)\n", m_id, next_height);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -200,7 +200,7 @@ bool HeadersSyncState::ValidateAndProcessSingleHeader(const CBlockHeader& curren
|
||||||
// It's possible the chain grew since we started the sync; so
|
// It's possible the chain grew since we started the sync; so
|
||||||
// potentially we could succeed in syncing the peer's chain if we
|
// potentially we could succeed in syncing the peer's chain if we
|
||||||
// try again later.
|
// try again later.
|
||||||
LogPrint(BCLog::HEADERSSYNC, "Initial headers sync aborted with peer=%d: exceeded max commitments at height=%i (presync phase)\n", m_id, next_height);
|
LogPrint(BCLog::NET, "Initial headers sync aborted with peer=%d: exceeded max commitments at height=%i (presync phase)\n", m_id, next_height);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -222,7 +222,7 @@ bool HeadersSyncState::ValidateAndStoreRedownloadedHeader(const CBlockHeader& he
|
||||||
// Ensure that we're working on a header that connects to the chain we're
|
// Ensure that we're working on a header that connects to the chain we're
|
||||||
// downloading.
|
// downloading.
|
||||||
if (header.hashPrevBlock != m_redownload_buffer_last_hash) {
|
if (header.hashPrevBlock != m_redownload_buffer_last_hash) {
|
||||||
LogPrint(BCLog::HEADERSSYNC, "Initial headers sync aborted with peer=%d: non-continuous headers at height=%i (redownload phase)\n", m_id, next_height);
|
LogPrint(BCLog::NET, "Initial headers sync aborted with peer=%d: non-continuous headers at height=%i (redownload phase)\n", m_id, next_height);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -236,7 +236,7 @@ bool HeadersSyncState::ValidateAndStoreRedownloadedHeader(const CBlockHeader& he
|
||||||
|
|
||||||
if (!PermittedDifficultyTransition(m_consensus_params, next_height,
|
if (!PermittedDifficultyTransition(m_consensus_params, next_height,
|
||||||
previous_nBits, header.nBits)) {
|
previous_nBits, header.nBits)) {
|
||||||
LogPrint(BCLog::HEADERSSYNC, "Initial headers sync aborted with peer=%d: invalid difficulty transition at height=%i (redownload phase)\n", m_id, next_height);
|
LogPrint(BCLog::NET, "Initial headers sync aborted with peer=%d: invalid difficulty transition at height=%i (redownload phase)\n", m_id, next_height);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -255,7 +255,7 @@ bool HeadersSyncState::ValidateAndStoreRedownloadedHeader(const CBlockHeader& he
|
||||||
// target blockhash just because we ran out of commitments.
|
// target blockhash just because we ran out of commitments.
|
||||||
if (!m_process_all_remaining_headers && next_height % HEADER_COMMITMENT_PERIOD == m_commit_offset) {
|
if (!m_process_all_remaining_headers && next_height % HEADER_COMMITMENT_PERIOD == m_commit_offset) {
|
||||||
if (m_header_commitments.size() == 0) {
|
if (m_header_commitments.size() == 0) {
|
||||||
LogPrint(BCLog::HEADERSSYNC, "Initial headers sync aborted with peer=%d: commitment overrun at height=%i (redownload phase)\n", m_id, next_height);
|
LogPrint(BCLog::NET, "Initial headers sync aborted with peer=%d: commitment overrun at height=%i (redownload phase)\n", m_id, next_height);
|
||||||
// Somehow our peer managed to feed us a different chain and
|
// Somehow our peer managed to feed us a different chain and
|
||||||
// we've run out of commitments.
|
// we've run out of commitments.
|
||||||
return false;
|
return false;
|
||||||
|
@ -264,7 +264,7 @@ bool HeadersSyncState::ValidateAndStoreRedownloadedHeader(const CBlockHeader& he
|
||||||
bool expected_commitment = m_header_commitments.front();
|
bool expected_commitment = m_header_commitments.front();
|
||||||
m_header_commitments.pop_front();
|
m_header_commitments.pop_front();
|
||||||
if (commitment != expected_commitment) {
|
if (commitment != expected_commitment) {
|
||||||
LogPrint(BCLog::HEADERSSYNC, "Initial headers sync aborted with peer=%d: commitment mismatch at height=%i (redownload phase)\n", m_id, next_height);
|
LogPrint(BCLog::NET, "Initial headers sync aborted with peer=%d: commitment mismatch at height=%i (redownload phase)\n", m_id, next_height);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -165,7 +165,6 @@ const CLogCategoryDesc LogCategories[] =
|
||||||
#endif
|
#endif
|
||||||
{BCLog::UTIL, "util"},
|
{BCLog::UTIL, "util"},
|
||||||
{BCLog::BLOCKSTORE, "blockstorage"},
|
{BCLog::BLOCKSTORE, "blockstorage"},
|
||||||
{BCLog::HEADERSSYNC, "headerssync"},
|
|
||||||
{BCLog::ALL, "1"},
|
{BCLog::ALL, "1"},
|
||||||
{BCLog::ALL, "all"},
|
{BCLog::ALL, "all"},
|
||||||
};
|
};
|
||||||
|
@ -264,8 +263,6 @@ std::string LogCategoryToStr(BCLog::LogFlags category)
|
||||||
return "util";
|
return "util";
|
||||||
case BCLog::LogFlags::BLOCKSTORE:
|
case BCLog::LogFlags::BLOCKSTORE:
|
||||||
return "blockstorage";
|
return "blockstorage";
|
||||||
case BCLog::LogFlags::HEADERSSYNC:
|
|
||||||
return "headerssync";
|
|
||||||
case BCLog::LogFlags::ALL:
|
case BCLog::LogFlags::ALL:
|
||||||
return "all";
|
return "all";
|
||||||
}
|
}
|
||||||
|
|
|
@ -65,7 +65,6 @@ namespace BCLog {
|
||||||
#endif
|
#endif
|
||||||
UTIL = (1 << 25),
|
UTIL = (1 << 25),
|
||||||
BLOCKSTORE = (1 << 26),
|
BLOCKSTORE = (1 << 26),
|
||||||
HEADERSSYNC = (1 << 27),
|
|
||||||
ALL = ~(uint32_t)0,
|
ALL = ~(uint32_t)0,
|
||||||
};
|
};
|
||||||
enum class Level {
|
enum class Level {
|
||||||
|
|
|
@ -2820,6 +2820,13 @@ void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, Peer& peer,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If our peer has NetPermissionFlags::NoBan privileges, then bypass our
|
||||||
|
// anti-DoS logic (this saves bandwidth when we connect to a trusted peer
|
||||||
|
// on startup).
|
||||||
|
if (pfrom.HasPermission(NetPermissionFlags::NoBan)) {
|
||||||
|
already_validated_work = true;
|
||||||
|
}
|
||||||
|
|
||||||
// At this point, the headers connect to something in our block index.
|
// At this point, the headers connect to something in our block index.
|
||||||
// Do anti-DoS checks to determine if we should process or store for later
|
// Do anti-DoS checks to determine if we should process or store for later
|
||||||
// processing.
|
// processing.
|
||||||
|
|
|
@ -3718,7 +3718,7 @@ void ChainstateManager::ReportHeadersPresync(const arith_uint256& work, int64_t
|
||||||
{
|
{
|
||||||
LOCK(cs_main);
|
LOCK(cs_main);
|
||||||
// Don't report headers presync progress if we already have a post-minchainwork header chain.
|
// Don't report headers presync progress if we already have a post-minchainwork header chain.
|
||||||
// This means we lose reporting for potentially legimate, but unlikely, deep reorgs, but
|
// This means we lose reporting for potentially legitimate, but unlikely, deep reorgs, but
|
||||||
// prevent attackers that spam low-work headers from filling our logs.
|
// prevent attackers that spam low-work headers from filling our logs.
|
||||||
if (m_best_header->nChainWork >= UintToArith256(GetConsensus().nMinimumChainWork)) return;
|
if (m_best_header->nChainWork >= UintToArith256(GetConsensus().nMinimumChainWork)) return;
|
||||||
// Rate limit headers presync updates to 4 per second, as these are not subject to DoS
|
// Rate limit headers presync updates to 4 per second, as these are not subject to DoS
|
||||||
|
|
|
@ -28,9 +28,9 @@ NODE2_BLOCKS_REQUIRED = 2047
|
||||||
class RejectLowDifficultyHeadersTest(BitcoinTestFramework):
|
class RejectLowDifficultyHeadersTest(BitcoinTestFramework):
|
||||||
def set_test_params(self):
|
def set_test_params(self):
|
||||||
self.setup_clean_chain = True
|
self.setup_clean_chain = True
|
||||||
self.num_nodes = 3
|
self.num_nodes = 4
|
||||||
# Node0 has no required chainwork; node1 requires 15 blocks on top of the genesis block; node2 requires 2047
|
# Node0 has no required chainwork; node1 requires 15 blocks on top of the genesis block; node2 requires 2047
|
||||||
self.extra_args = [["-minimumchainwork=0x0", "-checkblockindex=0"], ["-minimumchainwork=0x1f", "-checkblockindex=0"], ["-minimumchainwork=0x1000", "-checkblockindex=0"]]
|
self.extra_args = [["-minimumchainwork=0x0", "-checkblockindex=0"], ["-minimumchainwork=0x1f", "-checkblockindex=0"], ["-minimumchainwork=0x1000", "-checkblockindex=0"], ["-minimumchainwork=0x1000", "-checkblockindex=0", "-whitelist=noban@127.0.0.1"]]
|
||||||
|
|
||||||
def setup_network(self):
|
def setup_network(self):
|
||||||
self.setup_nodes()
|
self.setup_nodes()
|
||||||
|
@ -40,17 +40,34 @@ class RejectLowDifficultyHeadersTest(BitcoinTestFramework):
|
||||||
def disconnect_all(self):
|
def disconnect_all(self):
|
||||||
self.disconnect_nodes(0, 1)
|
self.disconnect_nodes(0, 1)
|
||||||
self.disconnect_nodes(0, 2)
|
self.disconnect_nodes(0, 2)
|
||||||
|
self.disconnect_nodes(0, 3)
|
||||||
|
|
||||||
def reconnect_all(self):
|
def reconnect_all(self):
|
||||||
self.connect_nodes(0, 1)
|
self.connect_nodes(0, 1)
|
||||||
self.connect_nodes(0, 2)
|
self.connect_nodes(0, 2)
|
||||||
|
self.connect_nodes(0, 3)
|
||||||
|
|
||||||
def test_chains_sync_when_long_enough(self):
|
def test_chains_sync_when_long_enough(self):
|
||||||
self.log.info("Generate blocks on the node with no required chainwork, and verify nodes 1 and 2 have no new headers in their headers tree")
|
self.log.info("Generate blocks on the node with no required chainwork, and verify nodes 1 and 2 have no new headers in their headers tree")
|
||||||
with self.nodes[1].assert_debug_log(expected_msgs=["[net] Ignoring low-work chain (height=14)"]), self.nodes[2].assert_debug_log(expected_msgs=["[net] Ignoring low-work chain (height=14)"]):
|
with self.nodes[1].assert_debug_log(expected_msgs=["[net] Ignoring low-work chain (height=14)"]), self.nodes[2].assert_debug_log(expected_msgs=["[net] Ignoring low-work chain (height=14)"]):
|
||||||
self.generate(self.nodes[0], NODE1_BLOCKS_REQUIRED-1, sync_fun=self.no_op)
|
self.generate(self.nodes[0], NODE1_BLOCKS_REQUIRED-1, sync_fun=self.no_op)
|
||||||
|
|
||||||
for node in self.nodes[1:]:
|
# Node3 should always allow headers due to noban permissions
|
||||||
|
self.log.info("Check that node3 will sync headers (due to noban permissions)")
|
||||||
|
|
||||||
|
def check_node3_chaintips(num_tips, tip_hash, height):
|
||||||
|
node3_chaintips = self.nodes[3].getchaintips()
|
||||||
|
assert(len(node3_chaintips) == num_tips)
|
||||||
|
assert {
|
||||||
|
'height': height,
|
||||||
|
'hash': tip_hash,
|
||||||
|
'branchlen': height,
|
||||||
|
'status': 'headers-only',
|
||||||
|
} in node3_chaintips
|
||||||
|
|
||||||
|
check_node3_chaintips(2, self.nodes[0].getbestblockhash(), NODE1_BLOCKS_REQUIRED-1)
|
||||||
|
|
||||||
|
for node in self.nodes[1:3]:
|
||||||
chaintips = node.getchaintips()
|
chaintips = node.getchaintips()
|
||||||
assert(len(chaintips) == 1)
|
assert(len(chaintips) == 1)
|
||||||
assert {
|
assert {
|
||||||
|
@ -63,7 +80,7 @@ class RejectLowDifficultyHeadersTest(BitcoinTestFramework):
|
||||||
self.log.info("Generate more blocks to satisfy node1's minchainwork requirement, and verify node2 still has no new headers in headers tree")
|
self.log.info("Generate more blocks to satisfy node1's minchainwork requirement, and verify node2 still has no new headers in headers tree")
|
||||||
with self.nodes[2].assert_debug_log(expected_msgs=["[net] Ignoring low-work chain (height=15)"]):
|
with self.nodes[2].assert_debug_log(expected_msgs=["[net] Ignoring low-work chain (height=15)"]):
|
||||||
self.generate(self.nodes[0], NODE1_BLOCKS_REQUIRED - self.nodes[0].getblockcount(), sync_fun=self.no_op)
|
self.generate(self.nodes[0], NODE1_BLOCKS_REQUIRED - self.nodes[0].getblockcount(), sync_fun=self.no_op)
|
||||||
self.sync_blocks(self.nodes[0:2])
|
self.sync_blocks(self.nodes[0:2]) # node3 will sync headers (noban permissions) but not blocks (due to minchainwork)
|
||||||
|
|
||||||
assert {
|
assert {
|
||||||
'height': 0,
|
'height': 0,
|
||||||
|
@ -74,10 +91,13 @@ class RejectLowDifficultyHeadersTest(BitcoinTestFramework):
|
||||||
|
|
||||||
assert(len(self.nodes[2].getchaintips()) == 1)
|
assert(len(self.nodes[2].getchaintips()) == 1)
|
||||||
|
|
||||||
self.log.info("Generate long chain for node0/node1")
|
self.log.info("Check that node3 accepted these headers as well")
|
||||||
|
check_node3_chaintips(2, self.nodes[0].getbestblockhash(), NODE1_BLOCKS_REQUIRED)
|
||||||
|
|
||||||
|
self.log.info("Generate long chain for node0/node1/node3")
|
||||||
self.generate(self.nodes[0], NODE2_BLOCKS_REQUIRED-self.nodes[0].getblockcount(), sync_fun=self.no_op)
|
self.generate(self.nodes[0], NODE2_BLOCKS_REQUIRED-self.nodes[0].getblockcount(), sync_fun=self.no_op)
|
||||||
|
|
||||||
self.log.info("Verify that node2 will sync the chain when it gets long enough")
|
self.log.info("Verify that node2 and node3 will sync the chain when it gets long enough")
|
||||||
self.sync_blocks()
|
self.sync_blocks()
|
||||||
|
|
||||||
def test_peerinfo_includes_headers_presync_height(self):
|
def test_peerinfo_includes_headers_presync_height(self):
|
||||||
|
|
Loading…
Add table
Reference in a new issue