mirror of
https://github.com/bitcoin/bitcoin.git
synced 2025-02-01 09:35:52 -05:00
Compare commits
22 commits
4afce729c4
...
dad51bd862
Author | SHA1 | Date | |
---|---|---|---|
|
dad51bd862 | ||
|
85f96b01b7 | ||
|
601a6a6917 | ||
|
eaf4b928e7 | ||
|
992f37f2e1 | ||
|
e1676b08f7 | ||
|
0082f6acc1 | ||
|
79d45b10f1 | ||
|
0713548137 | ||
|
93747d934b | ||
|
0cdddeb224 | ||
|
7fbb1bc44b | ||
|
57ba59c0cd | ||
|
9d2d9f7ce2 | ||
|
595edee169 | ||
|
d73ae603d4 | ||
|
27f99b6d63 | ||
|
42d5d53363 | ||
|
5e1ff82251 | ||
|
9019c08e4e | ||
|
c3d98815cc | ||
|
75668d079c |
26 changed files with 374 additions and 117 deletions
11
doc/release-notes-31600.md
Normal file
11
doc/release-notes-31600.md
Normal file
|
@ -0,0 +1,11 @@
|
|||
Updated RPCs
|
||||
---
|
||||
- the `getblocktemplate` RPC `curtime` (BIP22) and `mintime` (BIP23) fields now
|
||||
account for the timewarp fix proposed in BIP94 on all networks. This ensures
|
||||
that, in the event a timewarp fix softfork activates on mainnet, un-upgraded
|
||||
miners will not accidentally violate the timewarp rule. (#31376, #31600)
|
||||
|
||||
As a reminder, it's important that any software which uses the `getblocktemplate`
|
||||
RPC takes these values into account (either `curtime` or `mintime` is fine).
|
||||
Relying only on a clock can lead to invalid blocks under some circumstances,
|
||||
especially once a timewarp fix is deployed.
|
|
@ -106,6 +106,7 @@ int main(int argc, char* argv[])
|
|||
};
|
||||
auto notifications = std::make_unique<KernelNotifications>();
|
||||
|
||||
kernel::CacheSizes cache_sizes{DEFAULT_KERNEL_CACHE};
|
||||
|
||||
// SETUP: Chainstate
|
||||
auto chainparams = CChainParams::Main();
|
||||
|
@ -119,11 +120,14 @@ int main(int argc, char* argv[])
|
|||
.chainparams = chainman_opts.chainparams,
|
||||
.blocks_dir = abs_datadir / "blocks",
|
||||
.notifications = chainman_opts.notifications,
|
||||
.block_tree_db_params = DBParams{
|
||||
.path = abs_datadir / "blocks" / "index",
|
||||
.cache_bytes = cache_sizes.block_tree_db,
|
||||
},
|
||||
};
|
||||
util::SignalInterrupt interrupt;
|
||||
ChainstateManager chainman{interrupt, chainman_opts, blockman_opts};
|
||||
|
||||
kernel::CacheSizes cache_sizes{DEFAULT_KERNEL_CACHE};
|
||||
node::ChainstateLoadOptions options;
|
||||
auto [status, error] = node::LoadChainstate(chainman, cache_sizes, options);
|
||||
if (status != node::ChainstateLoadStatus::SUCCESS) {
|
||||
|
|
20
src/init.cpp
20
src/init.cpp
|
@ -1057,6 +1057,10 @@ bool AppInitParameterInteraction(const ArgsManager& args)
|
|||
.chainparams = chainman_opts_dummy.chainparams,
|
||||
.blocks_dir = args.GetBlocksDirPath(),
|
||||
.notifications = chainman_opts_dummy.notifications,
|
||||
.block_tree_db_params = DBParams{
|
||||
.path = args.GetDataDirNet() / "blocks" / "index",
|
||||
.cache_bytes = 0,
|
||||
},
|
||||
};
|
||||
auto blockman_result{ApplyArgsManOptions(args, blockman_opts_dummy)};
|
||||
if (!blockman_result) {
|
||||
|
@ -1203,18 +1207,33 @@ static ChainstateLoadResult InitAndLoadChainstate(
|
|||
.signals = node.validation_signals.get(),
|
||||
};
|
||||
Assert(ApplyArgsManOptions(args, chainman_opts)); // no error can happen, already checked in AppInitParameterInteraction
|
||||
|
||||
BlockManager::Options blockman_opts{
|
||||
.chainparams = chainman_opts.chainparams,
|
||||
.blocks_dir = args.GetBlocksDirPath(),
|
||||
.notifications = chainman_opts.notifications,
|
||||
.block_tree_db_params = DBParams{
|
||||
.path = args.GetDataDirNet() / "blocks" / "index",
|
||||
.cache_bytes = cache_sizes.block_tree_db,
|
||||
.wipe_data = do_reindex,
|
||||
},
|
||||
};
|
||||
Assert(ApplyArgsManOptions(args, blockman_opts)); // no error can happen, already checked in AppInitParameterInteraction
|
||||
|
||||
// Creating the chainstate manager internally creates a BlockManager, opens
|
||||
// the blocks tree db, and wipes existing block files in case of a reindex.
|
||||
// The coinsdb is opened at a later point on LoadChainstate.
|
||||
try {
|
||||
node.chainman = std::make_unique<ChainstateManager>(*Assert(node.shutdown_signal), chainman_opts, blockman_opts);
|
||||
} catch (dbwrapper_error& e) {
|
||||
LogError("%s", e.what());
|
||||
return {ChainstateLoadStatus::FAILURE, _("Error opening block database")};
|
||||
} catch (std::exception& e) {
|
||||
return {ChainstateLoadStatus::FAILURE_FATAL, Untranslated(strprintf("Failed to initialize ChainstateManager: %s", e.what()))};
|
||||
}
|
||||
ChainstateManager& chainman = *node.chainman;
|
||||
if (chainman.m_interrupt) return {ChainstateLoadStatus::INTERRUPTED, {}};
|
||||
|
||||
// This is defined and set here instead of inline in validation.h to avoid a hard
|
||||
// dependency between validation and index/base, since the latter is not in
|
||||
// libbitcoinkernel.
|
||||
|
@ -1237,7 +1256,6 @@ static ChainstateLoadResult InitAndLoadChainstate(
|
|||
};
|
||||
node::ChainstateLoadOptions options;
|
||||
options.mempool = Assert(node.mempool.get());
|
||||
options.wipe_block_tree_db = do_reindex;
|
||||
options.wipe_chainstate_db = do_reindex || do_reindex_chainstate;
|
||||
options.prune = chainman.m_blockman.IsPruneMode();
|
||||
options.check_blocks = args.GetIntArg("-checkblocks", DEFAULT_CHECKBLOCKS);
|
||||
|
|
|
@ -289,6 +289,9 @@ public:
|
|||
//! Check if any block has been pruned.
|
||||
virtual bool havePruned() = 0;
|
||||
|
||||
//! Get the current prune height.
|
||||
virtual std::optional<int> getPruneHeight() = 0;
|
||||
|
||||
//! Check if the node is ready to broadcast transactions.
|
||||
virtual bool isReadyToBroadcast() = 0;
|
||||
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
#ifndef BITCOIN_KERNEL_BLOCKMANAGER_OPTS_H
|
||||
#define BITCOIN_KERNEL_BLOCKMANAGER_OPTS_H
|
||||
|
||||
#include <dbwrapper.h>
|
||||
#include <kernel/notifications_interface.h>
|
||||
#include <util/fs.h>
|
||||
|
||||
|
@ -27,6 +28,7 @@ struct BlockManagerOpts {
|
|||
bool fast_prune{false};
|
||||
const fs::path blocks_dir;
|
||||
Notifications& notifications;
|
||||
DBParams block_tree_db_params;
|
||||
};
|
||||
|
||||
} // namespace kernel
|
||||
|
|
|
@ -42,7 +42,6 @@ struct ChainstateManagerOpts {
|
|||
std::optional<uint256> assumed_valid_block{};
|
||||
//! If the tip is older than this, the node is considered to be in initial block download.
|
||||
std::chrono::seconds max_tip_age{DEFAULT_MAX_TIP_AGE};
|
||||
DBOptions block_tree_db{};
|
||||
DBOptions coins_db{};
|
||||
CoinsViewOptions coins_view{};
|
||||
Notifications& notifications;
|
||||
|
|
|
@ -100,6 +100,8 @@ static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER = 16;
|
|||
static constexpr auto BLOCK_STALLING_TIMEOUT_DEFAULT{2s};
|
||||
/** Maximum timeout for stalling block download. */
|
||||
static constexpr auto BLOCK_STALLING_TIMEOUT_MAX{64s};
|
||||
/** Timeout for stalling when close to the tip, after which we may add additional peers to download from */
|
||||
static constexpr auto BLOCK_NEARTIP_TIMEOUT_MAX{30s};
|
||||
/** Maximum depth of blocks we're willing to serve as compact blocks to peers
|
||||
* when requested. For older blocks, a regular BLOCK response will be sent. */
|
||||
static const int MAX_CMPCTBLOCK_DEPTH = 5;
|
||||
|
@ -746,7 +748,10 @@ private:
|
|||
std::atomic<int> m_best_height{-1};
|
||||
/** The time of the best chain tip block */
|
||||
std::atomic<std::chrono::seconds> m_best_block_time{0s};
|
||||
|
||||
/** The last time we requested a block from any peer */
|
||||
std::atomic<std::chrono::seconds> m_last_block_requested{0s};
|
||||
/** The last time we received a block from any peer */
|
||||
std::atomic<std::chrono::seconds> m_last_block_received{0s};
|
||||
/** Next time to check for stale tip */
|
||||
std::chrono::seconds m_stale_tip_check_time GUARDED_BY(cs_main){0s};
|
||||
|
||||
|
@ -1213,6 +1218,7 @@ bool PeerManagerImpl::BlockRequested(NodeId nodeid, const CBlockIndex& block, st
|
|||
if (pit) {
|
||||
*pit = &itInFlight->second.second;
|
||||
}
|
||||
m_last_block_requested = GetTime<std::chrono::seconds>();
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -1461,6 +1467,30 @@ void PeerManagerImpl::FindNextBlocks(std::vector<const CBlockIndex*>& vBlocks, c
|
|||
if (waitingfor == -1) {
|
||||
// This is the first already-in-flight block.
|
||||
waitingfor = mapBlocksInFlight.lower_bound(pindex->GetBlockHash())->second.first;
|
||||
|
||||
// Decide whether to request this block from additional peers in parallel.
|
||||
// This is done if we are close (<=1024 blocks) from the tip, so that the usual
|
||||
// stalling mechanism doesn't work. To reduce excessive waste of bandwith, do this only
|
||||
// 30 seconds (BLOCK_NEARTIP_TIMEOUT_MAX) after a block was requested or received from any peer,
|
||||
// and only with up to 3 peers in parallel.
|
||||
bool already_requested_from_peer{false};
|
||||
auto range{mapBlocksInFlight.equal_range(pindex->GetBlockHash())};
|
||||
while (range.first != range.second) {
|
||||
if (range.first->second.first == peer.m_id) {
|
||||
already_requested_from_peer = true;
|
||||
break;
|
||||
}
|
||||
range.first++;
|
||||
}
|
||||
if (nMaxHeight <= nWindowEnd && // we have 1024 or less blocks left to download
|
||||
m_last_block_requested.load() > 0s &&
|
||||
GetTime<std::chrono::microseconds>() > m_last_block_requested.load() + BLOCK_NEARTIP_TIMEOUT_MAX &&
|
||||
GetTime<std::chrono::microseconds>() > m_last_block_received.load() + BLOCK_NEARTIP_TIMEOUT_MAX &&
|
||||
!already_requested_from_peer &&
|
||||
mapBlocksInFlight.count(pindex->GetBlockHash()) <= 2) {
|
||||
LogDebug(BCLog::NET, "Possible stalling close to tip: Requesting block %s additionally from peer %d\n", pindex->GetBlockHash().ToString(), peer.m_id);
|
||||
vBlocks.push_back(pindex);
|
||||
}
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
@ -3269,6 +3299,7 @@ void PeerManagerImpl::ProcessBlock(CNode& node, const std::shared_ptr<const CBlo
|
|||
m_chainman.ProcessNewBlock(block, force_processing, min_pow_checked, &new_block);
|
||||
if (new_block) {
|
||||
node.m_last_block_time = GetTime<std::chrono::seconds>();
|
||||
m_last_block_received = GetTime<std::chrono::seconds>();
|
||||
// In case this block came from a different peer than we requested
|
||||
// from, we can erase the block request now anyway (as we just stored
|
||||
// this block to disk).
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
|
||||
#include <common/args.h>
|
||||
#include <node/blockstorage.h>
|
||||
#include <node/database_args.h>
|
||||
#include <tinyformat.h>
|
||||
#include <util/result.h>
|
||||
#include <util/translation.h>
|
||||
|
@ -34,6 +35,8 @@ util::Result<void> ApplyArgsManOptions(const ArgsManager& args, BlockManager::Op
|
|||
|
||||
if (auto value{args.GetBoolArg("-fastprune")}) opts.fast_prune = *value;
|
||||
|
||||
ReadDatabaseArgs(args, opts.block_tree_db_params.options);
|
||||
|
||||
return {};
|
||||
}
|
||||
} // namespace node
|
||||
|
|
|
@ -36,6 +36,7 @@
|
|||
#include <util/translation.h>
|
||||
#include <validation.h>
|
||||
|
||||
#include <cstddef>
|
||||
#include <map>
|
||||
#include <ranges>
|
||||
#include <unordered_map>
|
||||
|
@ -1169,7 +1170,19 @@ BlockManager::BlockManager(const util::SignalInterrupt& interrupt, Options opts)
|
|||
m_opts{std::move(opts)},
|
||||
m_block_file_seq{FlatFileSeq{m_opts.blocks_dir, "blk", m_opts.fast_prune ? 0x4000 /* 16kB */ : BLOCKFILE_CHUNK_SIZE}},
|
||||
m_undo_file_seq{FlatFileSeq{m_opts.blocks_dir, "rev", UNDOFILE_CHUNK_SIZE}},
|
||||
m_interrupt{interrupt} {}
|
||||
m_interrupt{interrupt}
|
||||
{
|
||||
m_block_tree_db = std::make_unique<BlockTreeDB>(m_opts.block_tree_db_params);
|
||||
|
||||
if (m_opts.block_tree_db_params.wipe_data) {
|
||||
m_block_tree_db->WriteReindexing(true);
|
||||
m_blockfiles_indexed = false;
|
||||
// If we're reindexing in prune mode, wipe away unusable block files and all undo data files
|
||||
if (m_prune_mode) {
|
||||
CleanupBlockRevFiles();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class ImportingNow
|
||||
{
|
||||
|
|
|
@ -23,10 +23,7 @@
|
|||
#include <validation.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <atomic>
|
||||
#include <cassert>
|
||||
#include <limits>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
using kernel::CacheSizes;
|
||||
|
@ -36,34 +33,8 @@ namespace node {
|
|||
// to ChainstateManager::InitializeChainstate().
|
||||
static ChainstateLoadResult CompleteChainstateInitialization(
|
||||
ChainstateManager& chainman,
|
||||
const CacheSizes& cache_sizes,
|
||||
const ChainstateLoadOptions& options) EXCLUSIVE_LOCKS_REQUIRED(::cs_main)
|
||||
{
|
||||
auto& pblocktree{chainman.m_blockman.m_block_tree_db};
|
||||
// new BlockTreeDB tries to delete the existing file, which
|
||||
// fails if it's still open from the previous loop. Close it first:
|
||||
pblocktree.reset();
|
||||
try {
|
||||
pblocktree = std::make_unique<BlockTreeDB>(DBParams{
|
||||
.path = chainman.m_options.datadir / "blocks" / "index",
|
||||
.cache_bytes = cache_sizes.block_tree_db,
|
||||
.memory_only = options.block_tree_db_in_memory,
|
||||
.wipe_data = options.wipe_block_tree_db,
|
||||
.options = chainman.m_options.block_tree_db});
|
||||
} catch (dbwrapper_error& err) {
|
||||
LogError("%s\n", err.what());
|
||||
return {ChainstateLoadStatus::FAILURE, _("Error opening block database")};
|
||||
}
|
||||
|
||||
if (options.wipe_block_tree_db) {
|
||||
pblocktree->WriteReindexing(true);
|
||||
chainman.m_blockman.m_blockfiles_indexed = false;
|
||||
//If we're reindexing in prune mode, wipe away unusable block files and all undo data files
|
||||
if (options.prune) {
|
||||
chainman.m_blockman.CleanupBlockRevFiles();
|
||||
}
|
||||
}
|
||||
|
||||
if (chainman.m_interrupt) return {ChainstateLoadStatus::INTERRUPTED, {}};
|
||||
|
||||
// LoadBlockIndex will load m_have_pruned if we've ever removed a
|
||||
|
@ -155,14 +126,12 @@ static ChainstateLoadResult CompleteChainstateInitialization(
|
|||
}
|
||||
}
|
||||
|
||||
if (!options.wipe_block_tree_db) {
|
||||
auto chainstates{chainman.GetAll()};
|
||||
if (std::any_of(chainstates.begin(), chainstates.end(),
|
||||
[](const Chainstate* cs) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { return cs->NeedsRedownload(); })) {
|
||||
return {ChainstateLoadStatus::FAILURE, strprintf(_("Witness data for blocks after height %d requires validation. Please restart with -reindex."),
|
||||
chainman.GetConsensus().SegwitHeight)};
|
||||
};
|
||||
}
|
||||
auto chainstates{chainman.GetAll()};
|
||||
if (std::any_of(chainstates.begin(), chainstates.end(),
|
||||
[](const Chainstate* cs) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { return cs->NeedsRedownload(); })) {
|
||||
return {ChainstateLoadStatus::FAILURE, strprintf(_("Witness data for blocks after height %d requires validation. Please restart with -reindex."),
|
||||
chainman.GetConsensus().SegwitHeight)};
|
||||
};
|
||||
|
||||
// Now that chainstates are loaded and we're able to flush to
|
||||
// disk, rebalance the coins caches to desired levels based
|
||||
|
@ -208,7 +177,7 @@ ChainstateLoadResult LoadChainstate(ChainstateManager& chainman, const CacheSize
|
|||
}
|
||||
}
|
||||
|
||||
auto [init_status, init_error] = CompleteChainstateInitialization(chainman, cache_sizes, options);
|
||||
auto [init_status, init_error] = CompleteChainstateInitialization(chainman, options);
|
||||
if (init_status != ChainstateLoadStatus::SUCCESS) {
|
||||
return {init_status, init_error};
|
||||
}
|
||||
|
@ -244,7 +213,7 @@ ChainstateLoadResult LoadChainstate(ChainstateManager& chainman, const CacheSize
|
|||
// for the fully validated chainstate.
|
||||
chainman.ActiveChainstate().ClearBlockIndexCandidates();
|
||||
|
||||
auto [init_status, init_error] = CompleteChainstateInitialization(chainman, cache_sizes, options);
|
||||
auto [init_status, init_error] = CompleteChainstateInitialization(chainman, options);
|
||||
if (init_status != ChainstateLoadStatus::SUCCESS) {
|
||||
return {init_status, init_error};
|
||||
}
|
||||
|
|
|
@ -22,12 +22,7 @@ namespace node {
|
|||
|
||||
struct ChainstateLoadOptions {
|
||||
CTxMemPool* mempool{nullptr};
|
||||
bool block_tree_db_in_memory{false};
|
||||
bool coins_db_in_memory{false};
|
||||
// Whether to wipe the block tree database when loading it. If set, this
|
||||
// will also set a reindexing flag so any existing block data files will be
|
||||
// scanned and added to the database.
|
||||
bool wipe_block_tree_db{false};
|
||||
// Whether to wipe the chainstate database when loading it. If set, this
|
||||
// will cause the chainstate database to be rebuilt starting from genesis.
|
||||
bool wipe_chainstate_db{false};
|
||||
|
|
|
@ -49,7 +49,6 @@ util::Result<void> ApplyArgsManOptions(const ArgsManager& args, ChainstateManage
|
|||
|
||||
if (auto value{args.GetIntArg("-maxtipage")}) opts.max_tip_age = std::chrono::seconds{*value};
|
||||
|
||||
ReadDatabaseArgs(args, opts.block_tree_db);
|
||||
ReadDatabaseArgs(args, opts.coins_db);
|
||||
ReadCoinsViewArgs(args, opts.coins_view);
|
||||
|
||||
|
|
|
@ -46,6 +46,7 @@
|
|||
#include <policy/settings.h>
|
||||
#include <primitives/block.h>
|
||||
#include <primitives/transaction.h>
|
||||
#include <rpc/blockchain.h>
|
||||
#include <rpc/protocol.h>
|
||||
#include <rpc/server.h>
|
||||
#include <support/allocators/secure.h>
|
||||
|
@ -770,6 +771,11 @@ public:
|
|||
LOCK(::cs_main);
|
||||
return chainman().m_blockman.m_have_pruned;
|
||||
}
|
||||
std::optional<int> getPruneHeight() override
|
||||
{
|
||||
LOCK(chainman().GetMutex());
|
||||
return GetPruneHeight(chainman().m_blockman, chainman().ActiveChain());
|
||||
}
|
||||
bool isReadyToBroadcast() override { return !chainman().m_blockman.LoadingBlocks() && !isInitialBlockDownload(); }
|
||||
bool isInitialBlockDownload() override
|
||||
{
|
||||
|
|
|
@ -28,16 +28,25 @@
|
|||
#include <utility>
|
||||
|
||||
namespace node {
|
||||
|
||||
int64_t GetMinimumTime(const CBlockIndex* pindexPrev, const int64_t difficulty_adjustment_interval)
|
||||
{
|
||||
int64_t min_time{pindexPrev->GetMedianTimePast() + 1};
|
||||
// Height of block to be mined.
|
||||
const int height{pindexPrev->nHeight + 1};
|
||||
// Account for BIP94 timewarp rule on all networks. This makes future
|
||||
// activation safer.
|
||||
if (height % difficulty_adjustment_interval == 0) {
|
||||
min_time = std::max<int64_t>(min_time, pindexPrev->GetBlockTime() - MAX_TIMEWARP);
|
||||
}
|
||||
return min_time;
|
||||
}
|
||||
|
||||
int64_t UpdateTime(CBlockHeader* pblock, const Consensus::Params& consensusParams, const CBlockIndex* pindexPrev)
|
||||
{
|
||||
int64_t nOldTime = pblock->nTime;
|
||||
int64_t nNewTime{std::max<int64_t>(pindexPrev->GetMedianTimePast() + 1, TicksSinceEpoch<std::chrono::seconds>(NodeClock::now()))};
|
||||
|
||||
// Height of block to be mined.
|
||||
const int height{pindexPrev->nHeight + 1};
|
||||
if (height % consensusParams.DifficultyAdjustmentInterval() == 0) {
|
||||
nNewTime = std::max<int64_t>(nNewTime, pindexPrev->GetBlockTime() - MAX_TIMEWARP);
|
||||
}
|
||||
int64_t nNewTime{std::max<int64_t>(GetMinimumTime(pindexPrev, consensusParams.DifficultyAdjustmentInterval()),
|
||||
TicksSinceEpoch<std::chrono::seconds>(NodeClock::now()))};
|
||||
|
||||
if (nOldTime < nNewTime) {
|
||||
pblock->nTime = nNewTime;
|
||||
|
|
|
@ -211,6 +211,13 @@ private:
|
|||
void SortForBlock(const CTxMemPool::setEntries& package, std::vector<CTxMemPool::txiter>& sortedEntries);
|
||||
};
|
||||
|
||||
/**
|
||||
* Get the minimum time a miner should use in the next block. This always
|
||||
* accounts for the BIP94 timewarp rule, so does not necessarily reflect the
|
||||
* consensus limit.
|
||||
*/
|
||||
int64_t GetMinimumTime(const CBlockIndex* pindexPrev, const int64_t difficulty_adjustment_interval);
|
||||
|
||||
int64_t UpdateTime(CBlockHeader* pblock, const Consensus::Params& consensusParams, const CBlockIndex* pindexPrev);
|
||||
|
||||
/** Update an old GenerateCoinbaseCommitment from CreateNewBlock after the block txs have changed */
|
||||
|
|
|
@ -49,6 +49,7 @@
|
|||
using interfaces::BlockTemplate;
|
||||
using interfaces::Mining;
|
||||
using node::BlockAssembler;
|
||||
using node::GetMinimumTime;
|
||||
using node::NodeContext;
|
||||
using node::RegenerateCommitments;
|
||||
using node::UpdateTime;
|
||||
|
@ -674,7 +675,7 @@ static RPCHelpMan getblocktemplate()
|
|||
{RPCResult::Type::NUM, "coinbasevalue", "maximum allowable input to coinbase transaction, including the generation award and transaction fees (in satoshis)"},
|
||||
{RPCResult::Type::STR, "longpollid", "an id to include with a request to longpoll on an update to this template"},
|
||||
{RPCResult::Type::STR, "target", "The hash target"},
|
||||
{RPCResult::Type::NUM_TIME, "mintime", "The minimum timestamp appropriate for the next block time, expressed in " + UNIX_EPOCH_TIME},
|
||||
{RPCResult::Type::NUM_TIME, "mintime", "The minimum timestamp appropriate for the next block time, expressed in " + UNIX_EPOCH_TIME + ". Adjusted for the proposed BIP94 timewarp rule."},
|
||||
{RPCResult::Type::ARR, "mutable", "list of ways the block template may be changed",
|
||||
{
|
||||
{RPCResult::Type::STR, "value", "A way the block template may be changed, e.g. 'time', 'transactions', 'prevblock'"},
|
||||
|
@ -683,7 +684,7 @@ static RPCHelpMan getblocktemplate()
|
|||
{RPCResult::Type::NUM, "sigoplimit", "limit of sigops in blocks"},
|
||||
{RPCResult::Type::NUM, "sizelimit", "limit of block size"},
|
||||
{RPCResult::Type::NUM, "weightlimit", /*optional=*/true, "limit of block weight"},
|
||||
{RPCResult::Type::NUM_TIME, "curtime", "current timestamp in " + UNIX_EPOCH_TIME},
|
||||
{RPCResult::Type::NUM_TIME, "curtime", "current timestamp in " + UNIX_EPOCH_TIME + ". Adjusted for the proposed BIP94 timewarp rule."},
|
||||
{RPCResult::Type::STR, "bits", "compressed target of next block"},
|
||||
{RPCResult::Type::NUM, "height", "The height of the next block"},
|
||||
{RPCResult::Type::STR_HEX, "signet_challenge", /*optional=*/true, "Only on signet"},
|
||||
|
@ -977,7 +978,7 @@ static RPCHelpMan getblocktemplate()
|
|||
result.pushKV("coinbasevalue", (int64_t)block.vtx[0]->vout[0].nValue);
|
||||
result.pushKV("longpollid", tip.GetHex() + ToString(nTransactionsUpdatedLast));
|
||||
result.pushKV("target", hashTarget.GetHex());
|
||||
result.pushKV("mintime", (int64_t)pindexPrev->GetMedianTimePast()+1);
|
||||
result.pushKV("mintime", GetMinimumTime(pindexPrev, consensusParams.DifficultyAdjustmentInterval()));
|
||||
result.pushKV("mutable", std::move(aMutable));
|
||||
result.pushKV("noncerange", "00000000ffffffff");
|
||||
int64_t nSigOpLimit = MAX_BLOCK_SIGOPS_COST;
|
||||
|
|
|
@ -33,6 +33,10 @@ BOOST_AUTO_TEST_CASE(blockmanager_find_block_pos)
|
|||
.chainparams = *params,
|
||||
.blocks_dir = m_args.GetBlocksDirPath(),
|
||||
.notifications = notifications,
|
||||
.block_tree_db_params = DBParams{
|
||||
.path = m_args.GetDataDirNet() / "blocks" / "index",
|
||||
.cache_bytes = 0,
|
||||
},
|
||||
};
|
||||
BlockManager blockman{*Assert(m_node.shutdown_signal), blockman_opts};
|
||||
// simulate adding a genesis block normally
|
||||
|
@ -140,6 +144,10 @@ BOOST_AUTO_TEST_CASE(blockmanager_flush_block_file)
|
|||
.chainparams = Params(),
|
||||
.blocks_dir = m_args.GetBlocksDirPath(),
|
||||
.notifications = notifications,
|
||||
.block_tree_db_params = DBParams{
|
||||
.path = m_args.GetDataDirNet() / "blocks" / "index",
|
||||
.cache_bytes = 0,
|
||||
},
|
||||
};
|
||||
BlockManager blockman{*Assert(m_node.shutdown_signal), blockman_opts};
|
||||
|
||||
|
|
|
@ -62,7 +62,6 @@
|
|||
#include <stdexcept>
|
||||
|
||||
using namespace util::hex_literals;
|
||||
using kernel::BlockTreeDB;
|
||||
using node::ApplyArgsManOptions;
|
||||
using node::BlockAssembler;
|
||||
using node::BlockManager;
|
||||
|
@ -252,14 +251,14 @@ ChainTestingSetup::ChainTestingSetup(const ChainType chainType, TestOpts opts)
|
|||
.chainparams = chainman_opts.chainparams,
|
||||
.blocks_dir = m_args.GetBlocksDirPath(),
|
||||
.notifications = chainman_opts.notifications,
|
||||
.block_tree_db_params = DBParams{
|
||||
.path = m_args.GetDataDirNet() / "blocks" / "index",
|
||||
.cache_bytes = m_kernel_cache_sizes.block_tree_db,
|
||||
.memory_only = opts.block_tree_db_in_memory,
|
||||
.wipe_data = m_args.GetBoolArg("-reindex", false),
|
||||
},
|
||||
};
|
||||
m_node.chainman = std::make_unique<ChainstateManager>(*Assert(m_node.shutdown_signal), chainman_opts, blockman_opts);
|
||||
LOCK(m_node.chainman->GetMutex());
|
||||
m_node.chainman->m_blockman.m_block_tree_db = std::make_unique<BlockTreeDB>(DBParams{
|
||||
.path = m_args.GetDataDirNet() / "blocks" / "index",
|
||||
.cache_bytes = m_kernel_cache_sizes.block_tree_db,
|
||||
.memory_only = true,
|
||||
});
|
||||
};
|
||||
m_make_chainman();
|
||||
}
|
||||
|
@ -285,9 +284,7 @@ void ChainTestingSetup::LoadVerifyActivateChainstate()
|
|||
auto& chainman{*Assert(m_node.chainman)};
|
||||
node::ChainstateLoadOptions options;
|
||||
options.mempool = Assert(m_node.mempool.get());
|
||||
options.block_tree_db_in_memory = m_block_tree_db_in_memory;
|
||||
options.coins_db_in_memory = m_coins_db_in_memory;
|
||||
options.wipe_block_tree_db = m_args.GetBoolArg("-reindex", false);
|
||||
options.wipe_chainstate_db = m_args.GetBoolArg("-reindex", false) || m_args.GetBoolArg("-reindex-chainstate", false);
|
||||
options.prune = chainman.m_blockman.IsPruneMode();
|
||||
options.check_blocks = m_args.GetIntArg("-checkblocks", DEFAULT_CHECKBLOCKS);
|
||||
|
|
|
@ -393,6 +393,11 @@ struct SnapshotTestSetup : TestChain100Setup {
|
|||
.chainparams = chainman_opts.chainparams,
|
||||
.blocks_dir = m_args.GetBlocksDirPath(),
|
||||
.notifications = chainman_opts.notifications,
|
||||
.block_tree_db_params = DBParams{
|
||||
.path = chainman.m_options.datadir / "blocks" / "index",
|
||||
.cache_bytes = m_kernel_cache_sizes.block_tree_db,
|
||||
.memory_only = m_block_tree_db_in_memory,
|
||||
},
|
||||
};
|
||||
// For robustness, ensure the old manager is destroyed before creating a
|
||||
// new one.
|
||||
|
|
|
@ -5623,9 +5623,8 @@ double ChainstateManager::GuessVerificationProgress(const CBlockIndex* pindex) c
|
|||
return 0.0;
|
||||
}
|
||||
|
||||
if (!Assume(pindex->m_chain_tx_count > 0)) {
|
||||
LogWarning("Internal bug detected: block %d has unset m_chain_tx_count (%s %s). Please report this issue here: %s\n",
|
||||
pindex->nHeight, CLIENT_NAME, FormatFullVersion(), CLIENT_BUGREPORT);
|
||||
if (pindex->m_chain_tx_count == 0) {
|
||||
LogDebug(BCLog::VALIDATION, "Block %d has unset m_chain_tx_count. Unable to estimate verification progress.\n", pindex->nHeight);
|
||||
return 0.0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1745,20 +1745,27 @@ RPCHelpMan importdescriptors()
|
|||
if (scanned_time <= GetImportTimestamp(request, now) || results.at(i).exists("error")) {
|
||||
response.push_back(results.at(i));
|
||||
} else {
|
||||
std::string error_msg{strprintf("Rescan failed for descriptor with timestamp %d. There "
|
||||
"was an error reading a block from time %d, which is after or within %d seconds "
|
||||
"of key creation, and could contain transactions pertaining to the desc. As a "
|
||||
"result, transactions and coins using this desc may not appear in the wallet.",
|
||||
GetImportTimestamp(request, now), scanned_time - TIMESTAMP_WINDOW - 1, TIMESTAMP_WINDOW)};
|
||||
if (pwallet->chain().havePruned()) {
|
||||
error_msg += strprintf(" This error could be caused by pruning or data corruption "
|
||||
"(see bitcoind log for details) and could be dealt with by downloading and "
|
||||
"rescanning the relevant blocks (see -reindex option and rescanblockchain RPC).");
|
||||
} else if (pwallet->chain().hasAssumedValidChain()) {
|
||||
error_msg += strprintf(" This error is likely caused by an in-progress assumeutxo "
|
||||
"background sync. Check logs or getchainstates RPC for assumeutxo background "
|
||||
"sync progress and try again later.");
|
||||
} else {
|
||||
error_msg += strprintf(" This error could potentially caused by data corruption. If "
|
||||
"the issue persists you may want to reindex (see -reindex option).");
|
||||
}
|
||||
|
||||
UniValue result = UniValue(UniValue::VOBJ);
|
||||
result.pushKV("success", UniValue(false));
|
||||
result.pushKV(
|
||||
"error",
|
||||
JSONRPCError(
|
||||
RPC_MISC_ERROR,
|
||||
strprintf("Rescan failed for descriptor with timestamp %d. There was an error reading a "
|
||||
"block from time %d, which is after or within %d seconds of key creation, and "
|
||||
"could contain transactions pertaining to the desc. As a result, transactions "
|
||||
"and coins using this desc may not appear in the wallet. This error could be "
|
||||
"caused by pruning or data corruption (see bitcoind log for details) and could "
|
||||
"be dealt with by downloading and rescanning the relevant blocks (see -reindex "
|
||||
"option and rescanblockchain RPC).",
|
||||
GetImportTimestamp(request, now), scanned_time - TIMESTAMP_WINDOW - 1, TIMESTAMP_WINDOW)));
|
||||
result.pushKV("error", JSONRPCError(RPC_MISC_ERROR, error_msg));
|
||||
response.push_back(std::move(result));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
#include <key_io.h>
|
||||
#include <policy/rbf.h>
|
||||
#include <rpc/util.h>
|
||||
#include <rpc/blockchain.h>
|
||||
#include <util/vector.h>
|
||||
#include <wallet/receive.h>
|
||||
#include <wallet/rpc/util.h>
|
||||
|
@ -909,9 +910,15 @@ RPCHelpMan rescanblockchain()
|
|||
}
|
||||
}
|
||||
|
||||
// We can't rescan beyond non-pruned blocks, stop and throw an error
|
||||
// We can't rescan unavailable blocks, stop and throw an error
|
||||
if (!pwallet->chain().hasBlocks(pwallet->GetLastBlockHash(), start_height, stop_height)) {
|
||||
throw JSONRPCError(RPC_MISC_ERROR, "Can't rescan beyond pruned data. Use RPC call getblockchaininfo to determine your pruned height.");
|
||||
if (pwallet->chain().havePruned() && pwallet->chain().getPruneHeight() >= start_height) {
|
||||
throw JSONRPCError(RPC_MISC_ERROR, "Can't rescan beyond pruned data. Use RPC call getblockchaininfo to determine your pruned height.");
|
||||
}
|
||||
if (pwallet->chain().hasAssumedValidChain()) {
|
||||
throw JSONRPCError(RPC_MISC_ERROR, "Failed to rescan unavailable blocks likely due to an in-progress assumeutxo background sync. Check logs or getchainstates RPC for assumeutxo background sync progress and try again later.");
|
||||
}
|
||||
throw JSONRPCError(RPC_MISC_ERROR, "Failed to rescan unavailable blocks, potentially caused by data corruption. If the issue persists you may want to reindex (see -reindex option).");
|
||||
}
|
||||
|
||||
CHECK_NONFATAL(pwallet->chain().findAncestorByHeight(pwallet->GetLastBlockHash(), start_height, FoundBlock().hash(start_block)));
|
||||
|
|
|
@ -153,6 +153,8 @@ class MiningTest(BitcoinTestFramework):
|
|||
# The template will have an adjusted timestamp, which we then modify
|
||||
tmpl = node.getblocktemplate(NORMAL_GBT_REQUEST_PARAMS)
|
||||
assert_greater_than_or_equal(tmpl['curtime'], t + MAX_FUTURE_BLOCK_TIME - MAX_TIMEWARP)
|
||||
# mintime and curtime should match
|
||||
assert_equal(tmpl['mintime'], tmpl['curtime'])
|
||||
|
||||
block = CBlock()
|
||||
block.nVersion = tmpl["version"]
|
||||
|
|
|
@ -13,14 +13,26 @@ from test_framework.blocktools import (
|
|||
create_coinbase
|
||||
)
|
||||
from test_framework.messages import (
|
||||
COutPoint,
|
||||
CTransaction,
|
||||
CTxIn,
|
||||
CTxOut,
|
||||
HeaderAndShortIDs,
|
||||
MSG_BLOCK,
|
||||
MSG_TYPE_MASK,
|
||||
msg_cmpctblock,
|
||||
msg_sendcmpct,
|
||||
)
|
||||
from test_framework.script import (
|
||||
CScript,
|
||||
OP_TRUE,
|
||||
)
|
||||
from test_framework.p2p import (
|
||||
CBlockHeader,
|
||||
msg_block,
|
||||
msg_headers,
|
||||
P2PDataStore,
|
||||
p2p_lock,
|
||||
)
|
||||
from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.util import (
|
||||
|
@ -31,6 +43,7 @@ from test_framework.util import (
|
|||
class P2PStaller(P2PDataStore):
|
||||
def __init__(self, stall_block):
|
||||
self.stall_block = stall_block
|
||||
self.stall_block_requested = False
|
||||
super().__init__()
|
||||
|
||||
def on_getdata(self, message):
|
||||
|
@ -39,6 +52,8 @@ class P2PStaller(P2PDataStore):
|
|||
if (inv.type & MSG_TYPE_MASK) == MSG_BLOCK:
|
||||
if (inv.hash != self.stall_block):
|
||||
self.send_message(msg_block(self.block_store[inv.hash]))
|
||||
else:
|
||||
self.stall_block_requested = True
|
||||
|
||||
def on_getheaders(self, message):
|
||||
pass
|
||||
|
@ -47,44 +62,50 @@ class P2PStaller(P2PDataStore):
|
|||
class P2PIBDStallingTest(BitcoinTestFramework):
|
||||
def set_test_params(self):
|
||||
self.setup_clean_chain = True
|
||||
self.num_nodes = 1
|
||||
self.num_nodes = 3
|
||||
|
||||
def setup_network(self):
|
||||
self.setup_nodes()
|
||||
# Don't connect the nodes
|
||||
|
||||
def prepare_blocks(self):
|
||||
self.log.info("Prepare blocks without sending them to any node")
|
||||
self.NUM_BLOCKS = 1025
|
||||
self.block_dict = {}
|
||||
self.blocks = []
|
||||
|
||||
def run_test(self):
|
||||
NUM_BLOCKS = 1025
|
||||
NUM_PEERS = 4
|
||||
node = self.nodes[0]
|
||||
tip = int(node.getbestblockhash(), 16)
|
||||
blocks = []
|
||||
height = 1
|
||||
block_time = node.getblock(node.getbestblockhash())['time'] + 1
|
||||
self.log.info("Prepare blocks without sending them to the node")
|
||||
block_dict = {}
|
||||
for _ in range(NUM_BLOCKS):
|
||||
blocks.append(create_block(tip, create_coinbase(height), block_time))
|
||||
blocks[-1].solve()
|
||||
tip = blocks[-1].sha256
|
||||
block_time = int(time.time())
|
||||
for _ in range(self.NUM_BLOCKS):
|
||||
self.blocks.append(create_block(tip, create_coinbase(height), block_time))
|
||||
self.blocks[-1].solve()
|
||||
tip = self.blocks[-1].sha256
|
||||
block_time += 1
|
||||
height += 1
|
||||
block_dict[blocks[-1].sha256] = blocks[-1]
|
||||
stall_block = blocks[0].sha256
|
||||
self.block_dict[self.blocks[-1].sha256] = self.blocks[-1]
|
||||
|
||||
def ibd_stalling(self):
|
||||
NUM_PEERS = 4
|
||||
stall_block = self.blocks[0].sha256
|
||||
node = self.nodes[0]
|
||||
|
||||
headers_message = msg_headers()
|
||||
headers_message.headers = [CBlockHeader(b) for b in blocks[:NUM_BLOCKS-1]]
|
||||
headers_message.headers = [CBlockHeader(b) for b in self.blocks[:self.NUM_BLOCKS-1]]
|
||||
peers = []
|
||||
|
||||
self.log.info("Part 1: Test stalling during IBD")
|
||||
self.log.info("Check that a staller does not get disconnected if the 1024 block lookahead buffer is filled")
|
||||
self.mocktime = int(time.time()) + 1
|
||||
node.setmocktime(self.mocktime)
|
||||
for id in range(NUM_PEERS):
|
||||
peers.append(node.add_outbound_p2p_connection(P2PStaller(stall_block), p2p_idx=id, connection_type="outbound-full-relay"))
|
||||
peers[-1].block_store = block_dict
|
||||
peers[-1].block_store = self.block_dict
|
||||
peers[-1].send_message(headers_message)
|
||||
|
||||
# Need to wait until 1023 blocks are received - the magic total bytes number is a workaround in lack of an rpc
|
||||
# returning the number of downloaded (but not connected) blocks.
|
||||
bytes_recv = 172761 if not self.options.v2transport else 169692
|
||||
self.wait_until(lambda: self.total_bytes_recv_for_blocks() == bytes_recv)
|
||||
|
||||
# Wait until all blocks are received (except for stall_block), so that no other blocks are in flight.
|
||||
self.wait_until(lambda: sum(len(peer['inflight']) for peer in node.getpeerinfo()) == 1)
|
||||
self.all_sync_send_with_ping(peers)
|
||||
# If there was a peer marked for stalling, it would get disconnected
|
||||
self.mocktime += 3
|
||||
|
@ -93,7 +114,7 @@ class P2PIBDStallingTest(BitcoinTestFramework):
|
|||
assert_equal(node.num_test_p2p_connections(), NUM_PEERS)
|
||||
|
||||
self.log.info("Check that increasing the window beyond 1024 blocks triggers stalling logic")
|
||||
headers_message.headers = [CBlockHeader(b) for b in blocks]
|
||||
headers_message.headers = [CBlockHeader(b) for b in self.blocks]
|
||||
with node.assert_debug_log(expected_msgs=['Stall started']):
|
||||
for p in peers:
|
||||
p.send_message(headers_message)
|
||||
|
@ -139,17 +160,123 @@ class P2PIBDStallingTest(BitcoinTestFramework):
|
|||
with node.assert_debug_log(expected_msgs=['Decreased stalling timeout to 2 seconds']):
|
||||
for p in peers:
|
||||
if p.is_connected and (stall_block in p.getdata_requests):
|
||||
p.send_message(msg_block(block_dict[stall_block]))
|
||||
p.send_message(msg_block(self.block_dict[stall_block]))
|
||||
|
||||
self.log.info("Check that all outstanding blocks get connected")
|
||||
self.wait_until(lambda: node.getblockcount() == NUM_BLOCKS)
|
||||
self.wait_until(lambda: node.getblockcount() == self.NUM_BLOCKS)
|
||||
|
||||
def total_bytes_recv_for_blocks(self):
|
||||
total = 0
|
||||
for info in self.nodes[0].getpeerinfo():
|
||||
if ("block" in info["bytesrecv_per_msg"].keys()):
|
||||
total += info["bytesrecv_per_msg"]["block"]
|
||||
return total
|
||||
def near_tip_stalling(self):
|
||||
node = self.nodes[1]
|
||||
self.log.info("Part 3: Test stalling close to the tip")
|
||||
# only send <= 1024 headers, so that the window can't overshoot and the ibd stalling mechanism isn't triggered
|
||||
# make sure it works at different lengths
|
||||
for header_length in [1, 10, 1024]:
|
||||
peers = []
|
||||
stall_block = self.blocks[0].sha256
|
||||
headers_message = msg_headers()
|
||||
headers_message.headers = [CBlockHeader(b) for b in self.blocks[:self.NUM_BLOCKS-1][:header_length]]
|
||||
|
||||
self.mocktime = int(time.time())
|
||||
node.setmocktime(self.mocktime)
|
||||
|
||||
self.log.info(f"Add three stalling peers, sending {header_length} headers")
|
||||
for id in range(4):
|
||||
peers.append(node.add_outbound_p2p_connection(P2PStaller(stall_block), p2p_idx=id, connection_type="outbound-full-relay"))
|
||||
peers[-1].block_store = self.block_dict
|
||||
peers[-1].send_message(headers_message)
|
||||
|
||||
self.wait_until(lambda: sum(len(peer['inflight']) for peer in node.getpeerinfo()) == 1)
|
||||
self.all_sync_send_with_ping(peers)
|
||||
assert_equal(sum(peer.stall_block_requested for peer in peers), 1)
|
||||
|
||||
self.log.info("Check that after 30 seconds we request the block from a second peer")
|
||||
self.mocktime += 31
|
||||
node.setmocktime(self.mocktime)
|
||||
self.wait_until(lambda: sum(peer.stall_block_requested for peer in peers) == 2)
|
||||
|
||||
self.log.info("Check that after another 30 seconds we request the block from a third peer")
|
||||
self.mocktime += 31
|
||||
node.setmocktime(self.mocktime)
|
||||
self.wait_until(lambda: sum(peer.stall_block_requested for peer in peers) == 3)
|
||||
|
||||
self.log.info("Check that after another 30 seconds we aren't requesting it from a fourth peer yet")
|
||||
self.mocktime += 31
|
||||
node.setmocktime(self.mocktime)
|
||||
self.all_sync_send_with_ping(peers)
|
||||
self.wait_until(lambda: sum(peer.stall_block_requested for peer in peers) == 3)
|
||||
|
||||
self.log.info("Check that after another 20 minutes, first three stalling peers are disconnected")
|
||||
# 10 minutes BLOCK_DOWNLOAD_TIMEOUT_BASE + 2*5 minutes BLOCK_DOWNLOAD_TIMEOUT_PER_PEER
|
||||
self.mocktime += 20 * 60
|
||||
node.setmocktime(self.mocktime)
|
||||
# all peers have been requested
|
||||
self.wait_until(lambda: sum(peer.stall_block_requested for peer in peers) == 4)
|
||||
|
||||
self.log.info("Check that after another 20 minutes, last stalling peer is disconnected")
|
||||
# 10 minutes BLOCK_DOWNLOAD_TIMEOUT_BASE + 2*5 minutes BLOCK_DOWNLOAD_TIMEOUT_PER_PEER
|
||||
self.mocktime += 20 * 60
|
||||
node.setmocktime(self.mocktime)
|
||||
for peer in peers:
|
||||
peer.wait_for_disconnect()
|
||||
|
||||
self.log.info("Provide missing block and check that the sync succeeds")
|
||||
peer = node.add_outbound_p2p_connection(P2PStaller(stall_block), p2p_idx=0, connection_type="outbound-full-relay")
|
||||
peer.send_message(msg_block(self.block_dict[stall_block]))
|
||||
self.wait_until(lambda: node.getblockcount() == self.NUM_BLOCKS - 1)
|
||||
node.disconnect_p2ps()
|
||||
|
||||
def at_tip_stalling(self):
|
||||
self.log.info("Test stalling and interaction with compact blocks when at tip")
|
||||
node = self.nodes[2]
|
||||
peers = []
|
||||
# Create a block with a tx (would be invalid, but this doesn't matter since we will only ever send the header)
|
||||
tx = CTransaction()
|
||||
tx.vin.append(CTxIn(COutPoint(self.blocks[1].vtx[0].sha256, 0), scriptSig=b""))
|
||||
tx.vout.append(CTxOut(49 * 100000000, CScript([OP_TRUE])))
|
||||
tx.calc_sha256()
|
||||
block_time = self.blocks[1].nTime + 1
|
||||
block = create_block(self.blocks[1].sha256, create_coinbase(3), block_time, txlist=[tx])
|
||||
block.solve()
|
||||
|
||||
for id in range(3):
|
||||
peers.append(node.add_outbound_p2p_connection(P2PStaller(block.sha256), p2p_idx=id, connection_type="outbound-full-relay"))
|
||||
|
||||
# First Peer is a high-bw compact block peer
|
||||
peers[0].send_and_ping(msg_sendcmpct(announce=True, version=2))
|
||||
peers[0].block_store = self.block_dict
|
||||
headers_message = msg_headers()
|
||||
headers_message.headers = [CBlockHeader(b) for b in self.blocks[:2]]
|
||||
peers[0].send_message(headers_message)
|
||||
self.wait_until(lambda: node.getblockcount() == 2)
|
||||
|
||||
self.log.info("First peer announces via cmpctblock")
|
||||
cmpct_block = HeaderAndShortIDs()
|
||||
cmpct_block.initialize_from_block(block)
|
||||
peers[0].send_and_ping(msg_cmpctblock(cmpct_block.to_p2p()))
|
||||
with p2p_lock:
|
||||
assert "getblocktxn" in peers[0].last_message
|
||||
|
||||
self.log.info("Also announce block from other peers by header")
|
||||
headers_message = msg_headers()
|
||||
headers_message.headers = [CBlockHeader(block)]
|
||||
for peer in peers[1:4]:
|
||||
peer.send_and_ping(headers_message)
|
||||
|
||||
self.log.info("Check that block is requested from two more header-announcing peers")
|
||||
self.wait_until(lambda: sum(peer.stall_block_requested for peer in peers) == 0)
|
||||
|
||||
self.mocktime = int(time.time()) + 31
|
||||
node.setmocktime(self.mocktime)
|
||||
self.wait_until(lambda: sum(peer.stall_block_requested for peer in peers) == 1)
|
||||
|
||||
self.mocktime += 31
|
||||
node.setmocktime(self.mocktime)
|
||||
self.wait_until(lambda: sum(peer.stall_block_requested for peer in peers) == 2)
|
||||
|
||||
self.log.info("Check that block is not requested from a third header-announcing peer")
|
||||
self.mocktime += 31
|
||||
node.setmocktime(self.mocktime)
|
||||
self.wait_until(lambda: sum(peer.stall_block_requested for peer in peers) == 2)
|
||||
|
||||
def all_sync_send_with_ping(self, peers):
|
||||
for p in peers:
|
||||
|
@ -162,6 +289,12 @@ class P2PIBDStallingTest(BitcoinTestFramework):
|
|||
return True
|
||||
return False
|
||||
|
||||
def run_test(self):
|
||||
self.prepare_blocks()
|
||||
self.ibd_stalling()
|
||||
self.near_tip_stalling()
|
||||
self.at_tip_stalling()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
P2PIBDStallingTest(__file__).main()
|
||||
|
|
|
@ -549,6 +549,7 @@ class BlockchainTest(BitcoinTestFramework):
|
|||
# The chain has probably already been restored by the time reconsiderblock returns,
|
||||
# but poll anyway.
|
||||
self.wait_until(lambda: node.waitfornewblock(timeout=100)['hash'] == current_hash)
|
||||
assert_raises_rpc_error(-1, "Negative timeout", node.waitfornewblock, -1)
|
||||
|
||||
def _test_waitforblockheight(self):
|
||||
self.log.info("Test waitforblockheight")
|
||||
|
|
|
@ -7,11 +7,11 @@ See feature_assumeutxo.py for background.
|
|||
|
||||
## Possible test improvements
|
||||
|
||||
- TODO: test import descriptors while background sync is in progress
|
||||
- TODO: test loading a wallet (backup) on a pruned node
|
||||
|
||||
"""
|
||||
from test_framework.address import address_to_scriptpubkey
|
||||
from test_framework.descriptors import descsum_create
|
||||
from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.messages import COIN
|
||||
from test_framework.util import (
|
||||
|
@ -20,6 +20,7 @@ from test_framework.util import (
|
|||
ensure_for,
|
||||
)
|
||||
from test_framework.wallet import MiniWallet
|
||||
from test_framework.wallet_util import get_generate_key
|
||||
|
||||
START_HEIGHT = 199
|
||||
SNAPSHOT_BASE_HEIGHT = 299
|
||||
|
@ -49,6 +50,13 @@ class AssumeutxoTest(BitcoinTestFramework):
|
|||
self.add_nodes(3)
|
||||
self.start_nodes(extra_args=self.extra_args)
|
||||
|
||||
def import_descriptor(self, node, wallet_name, key, timestamp):
|
||||
import_request = [{"desc": descsum_create("pkh(" + key.pubkey + ")"),
|
||||
"timestamp": timestamp,
|
||||
"label": "Descriptor import test"}]
|
||||
wrpc = node.get_wallet_rpc(wallet_name)
|
||||
return wrpc.importdescriptors(import_request)
|
||||
|
||||
def run_test(self):
|
||||
"""
|
||||
Bring up two (disconnected) nodes, mine some new blocks on the first,
|
||||
|
@ -157,6 +165,21 @@ class AssumeutxoTest(BitcoinTestFramework):
|
|||
self.log.info("Backup from before the snapshot height can't be loaded during background sync")
|
||||
assert_raises_rpc_error(-4, "Wallet loading failed. Error loading wallet. Wallet requires blocks to be downloaded, and software does not currently support loading wallets while blocks are being downloaded out of order when using assumeutxo snapshots. Wallet should be able to load successfully after node sync reaches height 299", n1.restorewallet, "w2", "backup_w2.dat")
|
||||
|
||||
self.log.info("Test loading descriptors during background sync")
|
||||
wallet_name = "w1"
|
||||
n1.createwallet(wallet_name, disable_private_keys=True)
|
||||
key = get_generate_key()
|
||||
time = n1.getblockchaininfo()['time']
|
||||
timestamp = 0
|
||||
expected_error_message = f"Rescan failed for descriptor with timestamp {timestamp}. There was an error reading a block from time {time}, which is after or within 7200 seconds of key creation, and could contain transactions pertaining to the desc. As a result, transactions and coins using this desc may not appear in the wallet. This error is likely caused by an in-progress assumeutxo background sync. Check logs or getchainstates RPC for assumeutxo background sync progress and try again later."
|
||||
result = self.import_descriptor(n1, wallet_name, key, timestamp)
|
||||
assert_equal(result[0]['error']['code'], -1)
|
||||
assert_equal(result[0]['error']['message'], expected_error_message)
|
||||
|
||||
self.log.info("Test that rescanning blocks from before the snapshot fails when blocks are not available from the background sync yet")
|
||||
w1 = n1.get_wallet_rpc(wallet_name)
|
||||
assert_raises_rpc_error(-1, "Failed to rescan unavailable blocks likely due to an in-progress assumeutxo background sync. Check logs or getchainstates RPC for assumeutxo background sync progress and try again later.", w1.rescanblockchain, 100)
|
||||
|
||||
PAUSE_HEIGHT = FINAL_HEIGHT - 40
|
||||
|
||||
self.log.info("Restarting node to stop at height %d", PAUSE_HEIGHT)
|
||||
|
@ -204,6 +227,11 @@ class AssumeutxoTest(BitcoinTestFramework):
|
|||
self.wait_until(lambda: len(n2.getchainstates()['chainstates']) == 1)
|
||||
ensure_for(duration=1, f=lambda: (n2.getbalance() == 34))
|
||||
|
||||
self.log.info("Ensuring descriptors can be loaded after background sync")
|
||||
n1.loadwallet(wallet_name)
|
||||
result = self.import_descriptor(n1, wallet_name, key, timestamp)
|
||||
assert_equal(result[0]['success'], True)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
AssumeutxoTest(__file__).main()
|
||||
|
|
Loading…
Add table
Reference in a new issue