mirror of
https://github.com/bitcoin/bitcoin.git
synced 2025-03-06 14:19:59 -05:00
Compare commits
36 commits
2fb98386fe
...
226f6477bc
Author | SHA1 | Date | |
---|---|---|---|
![]() |
226f6477bc | ||
![]() |
85f96b01b7 | ||
![]() |
601a6a6917 | ||
![]() |
eaf4b928e7 | ||
![]() |
992f37f2e1 | ||
![]() |
2f9c381fd9 | ||
![]() |
8fa10edcd1 | ||
![]() |
809d7e763c | ||
![]() |
6835e9686c | ||
![]() |
c7869cb214 | ||
![]() |
1e0c5bd74a | ||
![]() |
1d6c6e98c1 | ||
![]() |
152a2dcdef | ||
![]() |
e1676b08f7 | ||
![]() |
0082f6acc1 | ||
![]() |
79d45b10f1 | ||
![]() |
0713548137 | ||
![]() |
93747d934b | ||
![]() |
551a09486c | ||
![]() |
0cdddeb224 | ||
![]() |
7fbb1bc44b | ||
![]() |
57ba59c0cd | ||
![]() |
bbac17608d | ||
![]() |
04b848e482 | ||
![]() |
0c4954ac7d | ||
![]() |
8888ee4403 | ||
![]() |
9d2d9f7ce2 | ||
![]() |
595edee169 | ||
![]() |
d73ae603d4 | ||
![]() |
27f99b6d63 | ||
![]() |
42d5d53363 | ||
![]() |
e87429a2d0 | ||
![]() |
d45eb3964f | ||
![]() |
01ddd9f646 | ||
![]() |
cddcbaf81e | ||
![]() |
4f4cd35319 |
35 changed files with 417 additions and 168 deletions
.github/workflows
ci/test
doc
src
bitcoin-chainstate.cppinit.cpp
interfaces
ipc
kernel
net.cppnet_processing.cppnode
blockmanager_args.cppblockstorage.cppchainstate.cppchainstate.hchainstatemanager_args.cppinterfaces.cppminer.cppminer.h
rpc
test
validation.cppwallet/rpc
test/functional
12
.github/workflows/ci.yml
vendored
12
.github/workflows/ci.yml
vendored
|
@ -270,16 +270,16 @@ jobs:
|
|||
timeout-minutes: 120
|
||||
env:
|
||||
FILE_ENV: "./ci/test/00_setup_env_native_asan.sh"
|
||||
DANGER_CI_ON_HOST_CACHE_FOLDERS: 1
|
||||
DANGER_CI_ON_HOST_FOLDERS: 1
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set Ccache directory
|
||||
run: echo "CCACHE_DIR=${RUNNER_TEMP}/ccache_dir" >> "$GITHUB_ENV"
|
||||
|
||||
- name: Set base root directory
|
||||
run: echo "BASE_ROOT_DIR=${RUNNER_TEMP}" >> "$GITHUB_ENV"
|
||||
- name: Set CI directories
|
||||
run: |
|
||||
echo "CCACHE_DIR=${{ runner.temp }}/ccache_dir" >> "$GITHUB_ENV"
|
||||
echo "BASE_ROOT_DIR=${{ runner.temp }}" >> "$GITHUB_ENV"
|
||||
echo "BASE_BUILD_DIR=${{ runner.temp }}/build-asan" >> "$GITHUB_ENV"
|
||||
|
||||
- name: Restore Ccache cache
|
||||
id: ccache-cache
|
||||
|
|
|
@ -25,6 +25,29 @@ if [ -z "$DANGER_RUN_CI_ON_HOST" ]; then
|
|||
fi
|
||||
echo "Creating $CI_IMAGE_NAME_TAG container to run in"
|
||||
|
||||
DOCKER_BUILD_CACHE_ARG=""
|
||||
DOCKER_BUILD_CACHE_TEMPDIR=""
|
||||
DOCKER_BUILD_CACHE_OLD_DIR=""
|
||||
DOCKER_BUILD_CACHE_NEW_DIR=""
|
||||
# If set, use an `docker build` cache directory on the CI host
|
||||
# to cache docker image layers for the CI container image.
|
||||
# This cache can be multiple GB in size. Prefixed with DANGER
|
||||
# as setting it removes (old cache) files from the host.
|
||||
if [ "$DANGER_DOCKER_BUILD_CACHE_HOST_DIR" ]; then
|
||||
# Directory where the current cache for this run could be. If not existing
|
||||
# or empty, "docker build" will warn, but treat it as cache-miss and continue.
|
||||
DOCKER_BUILD_CACHE_OLD_DIR="${DANGER_DOCKER_BUILD_CACHE_HOST_DIR}/${CONTAINER_NAME}"
|
||||
# Temporary directory for a newly created cache. We can't write the new
|
||||
# cache into OLD_DIR directly, as old cache layers would not be removed.
|
||||
# The NEW_DIR contents are moved to OLD_DIR after OLD_DIR has been cleared.
|
||||
# This happens after `docker build`. If a task fails or is aborted, the
|
||||
# DOCKER_BUILD_CACHE_TEMPDIR might be retained on the host. If the host isn't
|
||||
# ephemeral, it has to take care of cleaning old TEMPDIR's up.
|
||||
DOCKER_BUILD_CACHE_TEMPDIR="$(mktemp --directory ci-docker-build-cache-XXXXXXXXXX)"
|
||||
DOCKER_BUILD_CACHE_NEW_DIR="${DOCKER_BUILD_CACHE_TEMPDIR}/${CONTAINER_NAME}"
|
||||
DOCKER_BUILD_CACHE_ARG="--cache-from type=local,src=${DOCKER_BUILD_CACHE_OLD_DIR} --cache-to type=local,dest=${DOCKER_BUILD_CACHE_NEW_DIR},mode=max"
|
||||
fi
|
||||
|
||||
# shellcheck disable=SC2086
|
||||
DOCKER_BUILDKIT=1 docker build \
|
||||
--file "${BASE_READ_ONLY_DIR}/ci/test_imagefile" \
|
||||
|
@ -34,8 +57,18 @@ if [ -z "$DANGER_RUN_CI_ON_HOST" ]; then
|
|||
--platform="${CI_IMAGE_PLATFORM}" \
|
||||
--label="${CI_IMAGE_LABEL}" \
|
||||
--tag="${CONTAINER_NAME}" \
|
||||
$DOCKER_BUILD_CACHE_ARG \
|
||||
"${BASE_READ_ONLY_DIR}"
|
||||
|
||||
if [ "$DANGER_DOCKER_BUILD_CACHE_HOST_DIR" ]; then
|
||||
if [ -e "${DOCKER_BUILD_CACHE_NEW_DIR}/index.json" ]; then
|
||||
echo "Removing the existing docker build cache in ${DOCKER_BUILD_CACHE_OLD_DIR}"
|
||||
rm -rf "${DOCKER_BUILD_CACHE_OLD_DIR}"
|
||||
echo "Moving the contents of ${DOCKER_BUILD_CACHE_NEW_DIR} to ${DOCKER_BUILD_CACHE_OLD_DIR}"
|
||||
mv "${DOCKER_BUILD_CACHE_NEW_DIR}" "${DOCKER_BUILD_CACHE_OLD_DIR}"
|
||||
fi
|
||||
fi
|
||||
|
||||
docker volume create "${CONTAINER_NAME}_ccache" || true
|
||||
docker volume create "${CONTAINER_NAME}_depends" || true
|
||||
docker volume create "${CONTAINER_NAME}_depends_sources" || true
|
||||
|
@ -45,18 +78,21 @@ if [ -z "$DANGER_RUN_CI_ON_HOST" ]; then
|
|||
CI_DEPENDS_MOUNT="type=volume,src=${CONTAINER_NAME}_depends,dst=$DEPENDS_DIR/built"
|
||||
CI_DEPENDS_SOURCES_MOUNT="type=volume,src=${CONTAINER_NAME}_depends_sources,dst=$DEPENDS_DIR/sources"
|
||||
CI_PREVIOUS_RELEASES_MOUNT="type=volume,src=${CONTAINER_NAME}_previous_releases,dst=$PREVIOUS_RELEASES_DIR"
|
||||
CI_BUILD_MOUNT=""
|
||||
|
||||
if [ "$DANGER_CI_ON_HOST_CACHE_FOLDERS" ]; then
|
||||
if [ "$DANGER_CI_ON_HOST_FOLDERS" ]; then
|
||||
# ensure the directories exist
|
||||
mkdir -p "${CCACHE_DIR}"
|
||||
mkdir -p "${DEPENDS_DIR}/built"
|
||||
mkdir -p "${DEPENDS_DIR}/sources"
|
||||
mkdir -p "${PREVIOUS_RELEASES_DIR}"
|
||||
mkdir -p "${BASE_BUILD_DIR}" # Unset by default, must be defined externally
|
||||
|
||||
CI_CCACHE_MOUNT="type=bind,src=${CCACHE_DIR},dst=$CCACHE_DIR"
|
||||
CI_DEPENDS_MOUNT="type=bind,src=${DEPENDS_DIR}/built,dst=$DEPENDS_DIR/built"
|
||||
CI_DEPENDS_SOURCES_MOUNT="type=bind,src=${DEPENDS_DIR}/sources,dst=$DEPENDS_DIR/sources"
|
||||
CI_PREVIOUS_RELEASES_MOUNT="type=bind,src=${PREVIOUS_RELEASES_DIR},dst=$PREVIOUS_RELEASES_DIR"
|
||||
CI_BUILD_MOUNT="--mount type=bind,src=${BASE_BUILD_DIR},dst=${BASE_BUILD_DIR}"
|
||||
fi
|
||||
|
||||
if [ "$DANGER_CI_ON_HOST_CCACHE_FOLDER" ]; then
|
||||
|
@ -98,6 +134,7 @@ if [ -z "$DANGER_RUN_CI_ON_HOST" ]; then
|
|||
--mount "${CI_DEPENDS_MOUNT}" \
|
||||
--mount "${CI_DEPENDS_SOURCES_MOUNT}" \
|
||||
--mount "${CI_PREVIOUS_RELEASES_MOUNT}" \
|
||||
${CI_BUILD_MOUNT} \
|
||||
--env-file /tmp/env-$USER-$CONTAINER_NAME \
|
||||
--name "$CONTAINER_NAME" \
|
||||
--network ci-ip6net \
|
||||
|
|
11
doc/release-notes-31600.md
Normal file
11
doc/release-notes-31600.md
Normal file
|
@ -0,0 +1,11 @@
|
|||
Updated RPCs
|
||||
---
|
||||
- the `getblocktemplate` RPC `curtime` (BIP22) and `mintime` (BIP23) fields now
|
||||
account for the timewarp fix proposed in BIP94 on all networks. This ensures
|
||||
that, in the event a timewarp fix softfork activates on mainnet, un-upgraded
|
||||
miners will not accidentally violate the timewarp rule. (#31376, #31600)
|
||||
|
||||
As a reminder, it's important that any software which uses the `getblocktemplate`
|
||||
RPC takes these values into account (either `curtime` or `mintime` is fine).
|
||||
Relying only on a clock can lead to invalid blocks under some circumstances,
|
||||
especially once a timewarp fix is deployed.
|
|
@ -106,6 +106,7 @@ int main(int argc, char* argv[])
|
|||
};
|
||||
auto notifications = std::make_unique<KernelNotifications>();
|
||||
|
||||
kernel::CacheSizes cache_sizes{DEFAULT_KERNEL_CACHE};
|
||||
|
||||
// SETUP: Chainstate
|
||||
auto chainparams = CChainParams::Main();
|
||||
|
@ -119,11 +120,14 @@ int main(int argc, char* argv[])
|
|||
.chainparams = chainman_opts.chainparams,
|
||||
.blocks_dir = abs_datadir / "blocks",
|
||||
.notifications = chainman_opts.notifications,
|
||||
.block_tree_db_params = DBParams{
|
||||
.path = abs_datadir / "blocks" / "index",
|
||||
.cache_bytes = cache_sizes.block_tree_db,
|
||||
},
|
||||
};
|
||||
util::SignalInterrupt interrupt;
|
||||
ChainstateManager chainman{interrupt, chainman_opts, blockman_opts};
|
||||
|
||||
kernel::CacheSizes cache_sizes{DEFAULT_KERNEL_CACHE};
|
||||
node::ChainstateLoadOptions options;
|
||||
auto [status, error] = node::LoadChainstate(chainman, cache_sizes, options);
|
||||
if (status != node::ChainstateLoadStatus::SUCCESS) {
|
||||
|
|
20
src/init.cpp
20
src/init.cpp
|
@ -1057,6 +1057,10 @@ bool AppInitParameterInteraction(const ArgsManager& args)
|
|||
.chainparams = chainman_opts_dummy.chainparams,
|
||||
.blocks_dir = args.GetBlocksDirPath(),
|
||||
.notifications = chainman_opts_dummy.notifications,
|
||||
.block_tree_db_params = DBParams{
|
||||
.path = args.GetDataDirNet() / "blocks" / "index",
|
||||
.cache_bytes = 0,
|
||||
},
|
||||
};
|
||||
auto blockman_result{ApplyArgsManOptions(args, blockman_opts_dummy)};
|
||||
if (!blockman_result) {
|
||||
|
@ -1203,18 +1207,33 @@ static ChainstateLoadResult InitAndLoadChainstate(
|
|||
.signals = node.validation_signals.get(),
|
||||
};
|
||||
Assert(ApplyArgsManOptions(args, chainman_opts)); // no error can happen, already checked in AppInitParameterInteraction
|
||||
|
||||
BlockManager::Options blockman_opts{
|
||||
.chainparams = chainman_opts.chainparams,
|
||||
.blocks_dir = args.GetBlocksDirPath(),
|
||||
.notifications = chainman_opts.notifications,
|
||||
.block_tree_db_params = DBParams{
|
||||
.path = args.GetDataDirNet() / "blocks" / "index",
|
||||
.cache_bytes = cache_sizes.block_tree_db,
|
||||
.wipe_data = do_reindex,
|
||||
},
|
||||
};
|
||||
Assert(ApplyArgsManOptions(args, blockman_opts)); // no error can happen, already checked in AppInitParameterInteraction
|
||||
|
||||
// Creating the chainstate manager internally creates a BlockManager, opens
|
||||
// the blocks tree db, and wipes existing block files in case of a reindex.
|
||||
// The coinsdb is opened at a later point on LoadChainstate.
|
||||
try {
|
||||
node.chainman = std::make_unique<ChainstateManager>(*Assert(node.shutdown_signal), chainman_opts, blockman_opts);
|
||||
} catch (dbwrapper_error& e) {
|
||||
LogError("%s", e.what());
|
||||
return {ChainstateLoadStatus::FAILURE, _("Error opening block database")};
|
||||
} catch (std::exception& e) {
|
||||
return {ChainstateLoadStatus::FAILURE_FATAL, Untranslated(strprintf("Failed to initialize ChainstateManager: %s", e.what()))};
|
||||
}
|
||||
ChainstateManager& chainman = *node.chainman;
|
||||
if (chainman.m_interrupt) return {ChainstateLoadStatus::INTERRUPTED, {}};
|
||||
|
||||
// This is defined and set here instead of inline in validation.h to avoid a hard
|
||||
// dependency between validation and index/base, since the latter is not in
|
||||
// libbitcoinkernel.
|
||||
|
@ -1237,7 +1256,6 @@ static ChainstateLoadResult InitAndLoadChainstate(
|
|||
};
|
||||
node::ChainstateLoadOptions options;
|
||||
options.mempool = Assert(node.mempool.get());
|
||||
options.wipe_block_tree_db = do_reindex;
|
||||
options.wipe_chainstate_db = do_reindex || do_reindex_chainstate;
|
||||
options.prune = chainman.m_blockman.IsPruneMode();
|
||||
options.check_blocks = args.GetIntArg("-checkblocks", DEFAULT_CHECKBLOCKS);
|
||||
|
|
|
@ -289,6 +289,9 @@ public:
|
|||
//! Check if any block has been pruned.
|
||||
virtual bool havePruned() = 0;
|
||||
|
||||
//! Get the current prune height.
|
||||
virtual std::optional<int> getPruneHeight() = 0;
|
||||
|
||||
//! Check if the node is ready to broadcast transactions.
|
||||
virtual bool isReadyToBroadcast() = 0;
|
||||
|
||||
|
|
|
@ -11,9 +11,7 @@ if (NOT WITH_LIBMULTIPROCESS)
|
|||
set(MP_INCLUDE_DIR "${MP_INCLUDE_DIR}" PARENT_SCOPE)
|
||||
# Add mptest unit tests to "all" target so ctest can run them
|
||||
set_target_properties(mptest PROPERTIES EXCLUDE_FROM_ALL OFF)
|
||||
|
||||
# Add examples to "all" target so clang-tidy can check them
|
||||
set_target_properties(mpexamples PROPERTIES EXCLUDE_FROM_ALL OFF)
|
||||
set_target_properties(mpcalculator mpprinter mpexample PROPERTIES EXPORT_COMPILE_COMMANDS OFF)
|
||||
endif()
|
||||
|
||||
add_library(bitcoin_ipc STATIC EXCLUDE_FROM_ALL
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
#ifndef BITCOIN_KERNEL_BLOCKMANAGER_OPTS_H
|
||||
#define BITCOIN_KERNEL_BLOCKMANAGER_OPTS_H
|
||||
|
||||
#include <dbwrapper.h>
|
||||
#include <kernel/notifications_interface.h>
|
||||
#include <util/fs.h>
|
||||
|
||||
|
@ -27,6 +28,7 @@ struct BlockManagerOpts {
|
|||
bool fast_prune{false};
|
||||
const fs::path blocks_dir;
|
||||
Notifications& notifications;
|
||||
DBParams block_tree_db_params;
|
||||
};
|
||||
|
||||
} // namespace kernel
|
||||
|
|
|
@ -42,7 +42,6 @@ struct ChainstateManagerOpts {
|
|||
std::optional<uint256> assumed_valid_block{};
|
||||
//! If the tip is older than this, the node is considered to be in initial block download.
|
||||
std::chrono::seconds max_tip_age{DEFAULT_MAX_TIP_AGE};
|
||||
DBOptions block_tree_db{};
|
||||
DBOptions coins_db{};
|
||||
CoinsViewOptions coins_view{};
|
||||
Notifications& notifications;
|
||||
|
|
11
src/net.cpp
11
src/net.cpp
|
@ -558,6 +558,7 @@ void CNode::CloseSocketDisconnect()
|
|||
fDisconnect = true;
|
||||
LOCK(m_sock_mutex);
|
||||
if (m_sock) {
|
||||
LogDebug(BCLog::NET, "Resetting socket for peer=%d%s", GetId(), LogIP(fLogIPs));
|
||||
m_sock.reset();
|
||||
}
|
||||
m_i2p_sam_session.reset();
|
||||
|
@ -1706,7 +1707,7 @@ bool CConnman::AttemptToEvictConnection()
|
|||
LOCK(m_nodes_mutex);
|
||||
for (CNode* pnode : m_nodes) {
|
||||
if (pnode->GetId() == *node_id_to_evict) {
|
||||
LogDebug(BCLog::NET, "selected %s connection for eviction peer=%d; disconnecting\n", pnode->ConnectionTypeAsString(), pnode->GetId());
|
||||
LogDebug(BCLog::NET, "selected %s connection for eviction, %s", pnode->ConnectionTypeAsString(), pnode->DisconnectMsg(fLogIPs));
|
||||
pnode->fDisconnect = true;
|
||||
return true;
|
||||
}
|
||||
|
@ -3443,7 +3444,7 @@ void CConnman::StopNodes()
|
|||
std::vector<CNode*> nodes;
|
||||
WITH_LOCK(m_nodes_mutex, nodes.swap(m_nodes));
|
||||
for (CNode* pnode : nodes) {
|
||||
LogDebug(BCLog::NET, "%s\n", pnode->DisconnectMsg(fLogIPs));
|
||||
LogDebug(BCLog::NET, "Stopping node, %s", pnode->DisconnectMsg(fLogIPs));
|
||||
pnode->CloseSocketDisconnect();
|
||||
DeleteNode(pnode);
|
||||
}
|
||||
|
@ -3607,7 +3608,7 @@ bool CConnman::DisconnectNode(const std::string& strNode)
|
|||
{
|
||||
LOCK(m_nodes_mutex);
|
||||
if (CNode* pnode = FindNode(strNode)) {
|
||||
LogDebug(BCLog::NET, "disconnect by address%s matched peer=%d; disconnecting\n", (fLogIPs ? strprintf("=%s", strNode) : ""), pnode->GetId());
|
||||
LogDebug(BCLog::NET, "disconnect by address%s match, %s", (fLogIPs ? strprintf("=%s", strNode) : ""), pnode->DisconnectMsg(fLogIPs));
|
||||
pnode->fDisconnect = true;
|
||||
return true;
|
||||
}
|
||||
|
@ -3620,7 +3621,7 @@ bool CConnman::DisconnectNode(const CSubNet& subnet)
|
|||
LOCK(m_nodes_mutex);
|
||||
for (CNode* pnode : m_nodes) {
|
||||
if (subnet.Match(pnode->addr)) {
|
||||
LogDebug(BCLog::NET, "disconnect by subnet%s matched peer=%d; disconnecting\n", (fLogIPs ? strprintf("=%s", subnet.ToString()) : ""), pnode->GetId());
|
||||
LogDebug(BCLog::NET, "disconnect by subnet%s match, %s", (fLogIPs ? strprintf("=%s", subnet.ToString()) : ""), pnode->DisconnectMsg(fLogIPs));
|
||||
pnode->fDisconnect = true;
|
||||
disconnected = true;
|
||||
}
|
||||
|
@ -3638,7 +3639,7 @@ bool CConnman::DisconnectNode(NodeId id)
|
|||
LOCK(m_nodes_mutex);
|
||||
for(CNode* pnode : m_nodes) {
|
||||
if (id == pnode->GetId()) {
|
||||
LogDebug(BCLog::NET, "disconnect by id peer=%d; disconnecting\n", pnode->GetId());
|
||||
LogDebug(BCLog::NET, "disconnect by id, %s", pnode->DisconnectMsg(fLogIPs));
|
||||
pnode->fDisconnect = true;
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -4205,7 +4205,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
|
|||
|
||||
if (msg_type == NetMsgType::TX) {
|
||||
if (RejectIncomingTxs(pfrom)) {
|
||||
LogDebug(BCLog::NET, "transaction sent in violation of protocol peer=%d\n", pfrom.GetId());
|
||||
LogDebug(BCLog::NET, "transaction sent in violation of protocol, %s", pfrom.DisconnectMsg(fLogIPs));
|
||||
pfrom.fDisconnect = true;
|
||||
return;
|
||||
}
|
||||
|
@ -5207,7 +5207,7 @@ void PeerManagerImpl::MaybeSendPing(CNode& node_to, Peer& peer, std::chrono::mic
|
|||
{
|
||||
// The ping timeout is using mocktime. To disable the check during
|
||||
// testing, increase -peertimeout.
|
||||
LogDebug(BCLog::NET, "ping timeout: %fs peer=%d\n", 0.000001 * count_microseconds(now - peer.m_ping_start.load()), peer.m_id);
|
||||
LogDebug(BCLog::NET, "ping timeout: %fs, %s", 0.000001 * count_microseconds(now - peer.m_ping_start.load()), node_to.DisconnectMsg(fLogIPs));
|
||||
node_to.fDisconnect = true;
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
|
||||
#include <common/args.h>
|
||||
#include <node/blockstorage.h>
|
||||
#include <node/database_args.h>
|
||||
#include <tinyformat.h>
|
||||
#include <util/result.h>
|
||||
#include <util/translation.h>
|
||||
|
@ -34,6 +35,8 @@ util::Result<void> ApplyArgsManOptions(const ArgsManager& args, BlockManager::Op
|
|||
|
||||
if (auto value{args.GetBoolArg("-fastprune")}) opts.fast_prune = *value;
|
||||
|
||||
ReadDatabaseArgs(args, opts.block_tree_db_params.options);
|
||||
|
||||
return {};
|
||||
}
|
||||
} // namespace node
|
||||
|
|
|
@ -36,6 +36,7 @@
|
|||
#include <util/translation.h>
|
||||
#include <validation.h>
|
||||
|
||||
#include <cstddef>
|
||||
#include <map>
|
||||
#include <ranges>
|
||||
#include <unordered_map>
|
||||
|
@ -1169,7 +1170,19 @@ BlockManager::BlockManager(const util::SignalInterrupt& interrupt, Options opts)
|
|||
m_opts{std::move(opts)},
|
||||
m_block_file_seq{FlatFileSeq{m_opts.blocks_dir, "blk", m_opts.fast_prune ? 0x4000 /* 16kB */ : BLOCKFILE_CHUNK_SIZE}},
|
||||
m_undo_file_seq{FlatFileSeq{m_opts.blocks_dir, "rev", UNDOFILE_CHUNK_SIZE}},
|
||||
m_interrupt{interrupt} {}
|
||||
m_interrupt{interrupt}
|
||||
{
|
||||
m_block_tree_db = std::make_unique<BlockTreeDB>(m_opts.block_tree_db_params);
|
||||
|
||||
if (m_opts.block_tree_db_params.wipe_data) {
|
||||
m_block_tree_db->WriteReindexing(true);
|
||||
m_blockfiles_indexed = false;
|
||||
// If we're reindexing in prune mode, wipe away unusable block files and all undo data files
|
||||
if (m_prune_mode) {
|
||||
CleanupBlockRevFiles();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class ImportingNow
|
||||
{
|
||||
|
|
|
@ -23,10 +23,7 @@
|
|||
#include <validation.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <atomic>
|
||||
#include <cassert>
|
||||
#include <limits>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
using kernel::CacheSizes;
|
||||
|
@ -36,34 +33,8 @@ namespace node {
|
|||
// to ChainstateManager::InitializeChainstate().
|
||||
static ChainstateLoadResult CompleteChainstateInitialization(
|
||||
ChainstateManager& chainman,
|
||||
const CacheSizes& cache_sizes,
|
||||
const ChainstateLoadOptions& options) EXCLUSIVE_LOCKS_REQUIRED(::cs_main)
|
||||
{
|
||||
auto& pblocktree{chainman.m_blockman.m_block_tree_db};
|
||||
// new BlockTreeDB tries to delete the existing file, which
|
||||
// fails if it's still open from the previous loop. Close it first:
|
||||
pblocktree.reset();
|
||||
try {
|
||||
pblocktree = std::make_unique<BlockTreeDB>(DBParams{
|
||||
.path = chainman.m_options.datadir / "blocks" / "index",
|
||||
.cache_bytes = cache_sizes.block_tree_db,
|
||||
.memory_only = options.block_tree_db_in_memory,
|
||||
.wipe_data = options.wipe_block_tree_db,
|
||||
.options = chainman.m_options.block_tree_db});
|
||||
} catch (dbwrapper_error& err) {
|
||||
LogError("%s\n", err.what());
|
||||
return {ChainstateLoadStatus::FAILURE, _("Error opening block database")};
|
||||
}
|
||||
|
||||
if (options.wipe_block_tree_db) {
|
||||
pblocktree->WriteReindexing(true);
|
||||
chainman.m_blockman.m_blockfiles_indexed = false;
|
||||
//If we're reindexing in prune mode, wipe away unusable block files and all undo data files
|
||||
if (options.prune) {
|
||||
chainman.m_blockman.CleanupBlockRevFiles();
|
||||
}
|
||||
}
|
||||
|
||||
if (chainman.m_interrupt) return {ChainstateLoadStatus::INTERRUPTED, {}};
|
||||
|
||||
// LoadBlockIndex will load m_have_pruned if we've ever removed a
|
||||
|
@ -155,14 +126,12 @@ static ChainstateLoadResult CompleteChainstateInitialization(
|
|||
}
|
||||
}
|
||||
|
||||
if (!options.wipe_block_tree_db) {
|
||||
auto chainstates{chainman.GetAll()};
|
||||
if (std::any_of(chainstates.begin(), chainstates.end(),
|
||||
[](const Chainstate* cs) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { return cs->NeedsRedownload(); })) {
|
||||
return {ChainstateLoadStatus::FAILURE, strprintf(_("Witness data for blocks after height %d requires validation. Please restart with -reindex."),
|
||||
chainman.GetConsensus().SegwitHeight)};
|
||||
};
|
||||
}
|
||||
auto chainstates{chainman.GetAll()};
|
||||
if (std::any_of(chainstates.begin(), chainstates.end(),
|
||||
[](const Chainstate* cs) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { return cs->NeedsRedownload(); })) {
|
||||
return {ChainstateLoadStatus::FAILURE, strprintf(_("Witness data for blocks after height %d requires validation. Please restart with -reindex."),
|
||||
chainman.GetConsensus().SegwitHeight)};
|
||||
};
|
||||
|
||||
// Now that chainstates are loaded and we're able to flush to
|
||||
// disk, rebalance the coins caches to desired levels based
|
||||
|
@ -208,7 +177,7 @@ ChainstateLoadResult LoadChainstate(ChainstateManager& chainman, const CacheSize
|
|||
}
|
||||
}
|
||||
|
||||
auto [init_status, init_error] = CompleteChainstateInitialization(chainman, cache_sizes, options);
|
||||
auto [init_status, init_error] = CompleteChainstateInitialization(chainman, options);
|
||||
if (init_status != ChainstateLoadStatus::SUCCESS) {
|
||||
return {init_status, init_error};
|
||||
}
|
||||
|
@ -244,7 +213,7 @@ ChainstateLoadResult LoadChainstate(ChainstateManager& chainman, const CacheSize
|
|||
// for the fully validated chainstate.
|
||||
chainman.ActiveChainstate().ClearBlockIndexCandidates();
|
||||
|
||||
auto [init_status, init_error] = CompleteChainstateInitialization(chainman, cache_sizes, options);
|
||||
auto [init_status, init_error] = CompleteChainstateInitialization(chainman, options);
|
||||
if (init_status != ChainstateLoadStatus::SUCCESS) {
|
||||
return {init_status, init_error};
|
||||
}
|
||||
|
|
|
@ -22,12 +22,7 @@ namespace node {
|
|||
|
||||
struct ChainstateLoadOptions {
|
||||
CTxMemPool* mempool{nullptr};
|
||||
bool block_tree_db_in_memory{false};
|
||||
bool coins_db_in_memory{false};
|
||||
// Whether to wipe the block tree database when loading it. If set, this
|
||||
// will also set a reindexing flag so any existing block data files will be
|
||||
// scanned and added to the database.
|
||||
bool wipe_block_tree_db{false};
|
||||
// Whether to wipe the chainstate database when loading it. If set, this
|
||||
// will cause the chainstate database to be rebuilt starting from genesis.
|
||||
bool wipe_chainstate_db{false};
|
||||
|
|
|
@ -49,7 +49,6 @@ util::Result<void> ApplyArgsManOptions(const ArgsManager& args, ChainstateManage
|
|||
|
||||
if (auto value{args.GetIntArg("-maxtipage")}) opts.max_tip_age = std::chrono::seconds{*value};
|
||||
|
||||
ReadDatabaseArgs(args, opts.block_tree_db);
|
||||
ReadDatabaseArgs(args, opts.coins_db);
|
||||
ReadCoinsViewArgs(args, opts.coins_view);
|
||||
|
||||
|
|
|
@ -46,6 +46,7 @@
|
|||
#include <policy/settings.h>
|
||||
#include <primitives/block.h>
|
||||
#include <primitives/transaction.h>
|
||||
#include <rpc/blockchain.h>
|
||||
#include <rpc/protocol.h>
|
||||
#include <rpc/server.h>
|
||||
#include <support/allocators/secure.h>
|
||||
|
@ -770,6 +771,11 @@ public:
|
|||
LOCK(::cs_main);
|
||||
return chainman().m_blockman.m_have_pruned;
|
||||
}
|
||||
std::optional<int> getPruneHeight() override
|
||||
{
|
||||
LOCK(chainman().GetMutex());
|
||||
return GetPruneHeight(chainman().m_blockman, chainman().ActiveChain());
|
||||
}
|
||||
bool isReadyToBroadcast() override { return !chainman().m_blockman.LoadingBlocks() && !isInitialBlockDownload(); }
|
||||
bool isInitialBlockDownload() override
|
||||
{
|
||||
|
|
|
@ -28,16 +28,25 @@
|
|||
#include <utility>
|
||||
|
||||
namespace node {
|
||||
|
||||
int64_t GetMinimumTime(const CBlockIndex* pindexPrev, const int64_t difficulty_adjustment_interval)
|
||||
{
|
||||
int64_t min_time{pindexPrev->GetMedianTimePast() + 1};
|
||||
// Height of block to be mined.
|
||||
const int height{pindexPrev->nHeight + 1};
|
||||
// Account for BIP94 timewarp rule on all networks. This makes future
|
||||
// activation safer.
|
||||
if (height % difficulty_adjustment_interval == 0) {
|
||||
min_time = std::max<int64_t>(min_time, pindexPrev->GetBlockTime() - MAX_TIMEWARP);
|
||||
}
|
||||
return min_time;
|
||||
}
|
||||
|
||||
int64_t UpdateTime(CBlockHeader* pblock, const Consensus::Params& consensusParams, const CBlockIndex* pindexPrev)
|
||||
{
|
||||
int64_t nOldTime = pblock->nTime;
|
||||
int64_t nNewTime{std::max<int64_t>(pindexPrev->GetMedianTimePast() + 1, TicksSinceEpoch<std::chrono::seconds>(NodeClock::now()))};
|
||||
|
||||
// Height of block to be mined.
|
||||
const int height{pindexPrev->nHeight + 1};
|
||||
if (height % consensusParams.DifficultyAdjustmentInterval() == 0) {
|
||||
nNewTime = std::max<int64_t>(nNewTime, pindexPrev->GetBlockTime() - MAX_TIMEWARP);
|
||||
}
|
||||
int64_t nNewTime{std::max<int64_t>(GetMinimumTime(pindexPrev, consensusParams.DifficultyAdjustmentInterval()),
|
||||
TicksSinceEpoch<std::chrono::seconds>(NodeClock::now()))};
|
||||
|
||||
if (nOldTime < nNewTime) {
|
||||
pblock->nTime = nNewTime;
|
||||
|
|
|
@ -211,6 +211,13 @@ private:
|
|||
void SortForBlock(const CTxMemPool::setEntries& package, std::vector<CTxMemPool::txiter>& sortedEntries);
|
||||
};
|
||||
|
||||
/**
|
||||
* Get the minimum time a miner should use in the next block. This always
|
||||
* accounts for the BIP94 timewarp rule, so does not necessarily reflect the
|
||||
* consensus limit.
|
||||
*/
|
||||
int64_t GetMinimumTime(const CBlockIndex* pindexPrev, const int64_t difficulty_adjustment_interval);
|
||||
|
||||
int64_t UpdateTime(CBlockHeader* pblock, const Consensus::Params& consensusParams, const CBlockIndex* pindexPrev);
|
||||
|
||||
/** Update an old GenerateCoinbaseCommitment from CreateNewBlock after the block txs have changed */
|
||||
|
|
|
@ -49,6 +49,7 @@
|
|||
using interfaces::BlockTemplate;
|
||||
using interfaces::Mining;
|
||||
using node::BlockAssembler;
|
||||
using node::GetMinimumTime;
|
||||
using node::NodeContext;
|
||||
using node::RegenerateCommitments;
|
||||
using node::UpdateTime;
|
||||
|
@ -674,7 +675,7 @@ static RPCHelpMan getblocktemplate()
|
|||
{RPCResult::Type::NUM, "coinbasevalue", "maximum allowable input to coinbase transaction, including the generation award and transaction fees (in satoshis)"},
|
||||
{RPCResult::Type::STR, "longpollid", "an id to include with a request to longpoll on an update to this template"},
|
||||
{RPCResult::Type::STR, "target", "The hash target"},
|
||||
{RPCResult::Type::NUM_TIME, "mintime", "The minimum timestamp appropriate for the next block time, expressed in " + UNIX_EPOCH_TIME},
|
||||
{RPCResult::Type::NUM_TIME, "mintime", "The minimum timestamp appropriate for the next block time, expressed in " + UNIX_EPOCH_TIME + ". Adjusted for the proposed BIP94 timewarp rule."},
|
||||
{RPCResult::Type::ARR, "mutable", "list of ways the block template may be changed",
|
||||
{
|
||||
{RPCResult::Type::STR, "value", "A way the block template may be changed, e.g. 'time', 'transactions', 'prevblock'"},
|
||||
|
@ -683,7 +684,7 @@ static RPCHelpMan getblocktemplate()
|
|||
{RPCResult::Type::NUM, "sigoplimit", "limit of sigops in blocks"},
|
||||
{RPCResult::Type::NUM, "sizelimit", "limit of block size"},
|
||||
{RPCResult::Type::NUM, "weightlimit", /*optional=*/true, "limit of block weight"},
|
||||
{RPCResult::Type::NUM_TIME, "curtime", "current timestamp in " + UNIX_EPOCH_TIME},
|
||||
{RPCResult::Type::NUM_TIME, "curtime", "current timestamp in " + UNIX_EPOCH_TIME + ". Adjusted for the proposed BIP94 timewarp rule."},
|
||||
{RPCResult::Type::STR, "bits", "compressed target of next block"},
|
||||
{RPCResult::Type::NUM, "height", "The height of the next block"},
|
||||
{RPCResult::Type::STR_HEX, "signet_challenge", /*optional=*/true, "Only on signet"},
|
||||
|
@ -977,7 +978,7 @@ static RPCHelpMan getblocktemplate()
|
|||
result.pushKV("coinbasevalue", (int64_t)block.vtx[0]->vout[0].nValue);
|
||||
result.pushKV("longpollid", tip.GetHex() + ToString(nTransactionsUpdatedLast));
|
||||
result.pushKV("target", hashTarget.GetHex());
|
||||
result.pushKV("mintime", (int64_t)pindexPrev->GetMedianTimePast()+1);
|
||||
result.pushKV("mintime", GetMinimumTime(pindexPrev, consensusParams.DifficultyAdjustmentInterval()));
|
||||
result.pushKV("mutable", std::move(aMutable));
|
||||
result.pushKV("noncerange", "00000000ffffffff");
|
||||
int64_t nSigOpLimit = MAX_BLOCK_SIGOPS_COST;
|
||||
|
|
|
@ -33,6 +33,10 @@ BOOST_AUTO_TEST_CASE(blockmanager_find_block_pos)
|
|||
.chainparams = *params,
|
||||
.blocks_dir = m_args.GetBlocksDirPath(),
|
||||
.notifications = notifications,
|
||||
.block_tree_db_params = DBParams{
|
||||
.path = m_args.GetDataDirNet() / "blocks" / "index",
|
||||
.cache_bytes = 0,
|
||||
},
|
||||
};
|
||||
BlockManager blockman{*Assert(m_node.shutdown_signal), blockman_opts};
|
||||
// simulate adding a genesis block normally
|
||||
|
@ -140,6 +144,10 @@ BOOST_AUTO_TEST_CASE(blockmanager_flush_block_file)
|
|||
.chainparams = Params(),
|
||||
.blocks_dir = m_args.GetBlocksDirPath(),
|
||||
.notifications = notifications,
|
||||
.block_tree_db_params = DBParams{
|
||||
.path = m_args.GetDataDirNet() / "blocks" / "index",
|
||||
.cache_bytes = 0,
|
||||
},
|
||||
};
|
||||
BlockManager blockman{*Assert(m_node.shutdown_signal), blockman_opts};
|
||||
|
||||
|
|
|
@ -62,7 +62,6 @@
|
|||
#include <stdexcept>
|
||||
|
||||
using namespace util::hex_literals;
|
||||
using kernel::BlockTreeDB;
|
||||
using node::ApplyArgsManOptions;
|
||||
using node::BlockAssembler;
|
||||
using node::BlockManager;
|
||||
|
@ -252,14 +251,14 @@ ChainTestingSetup::ChainTestingSetup(const ChainType chainType, TestOpts opts)
|
|||
.chainparams = chainman_opts.chainparams,
|
||||
.blocks_dir = m_args.GetBlocksDirPath(),
|
||||
.notifications = chainman_opts.notifications,
|
||||
.block_tree_db_params = DBParams{
|
||||
.path = m_args.GetDataDirNet() / "blocks" / "index",
|
||||
.cache_bytes = m_kernel_cache_sizes.block_tree_db,
|
||||
.memory_only = opts.block_tree_db_in_memory,
|
||||
.wipe_data = m_args.GetBoolArg("-reindex", false),
|
||||
},
|
||||
};
|
||||
m_node.chainman = std::make_unique<ChainstateManager>(*Assert(m_node.shutdown_signal), chainman_opts, blockman_opts);
|
||||
LOCK(m_node.chainman->GetMutex());
|
||||
m_node.chainman->m_blockman.m_block_tree_db = std::make_unique<BlockTreeDB>(DBParams{
|
||||
.path = m_args.GetDataDirNet() / "blocks" / "index",
|
||||
.cache_bytes = m_kernel_cache_sizes.block_tree_db,
|
||||
.memory_only = true,
|
||||
});
|
||||
};
|
||||
m_make_chainman();
|
||||
}
|
||||
|
@ -285,9 +284,7 @@ void ChainTestingSetup::LoadVerifyActivateChainstate()
|
|||
auto& chainman{*Assert(m_node.chainman)};
|
||||
node::ChainstateLoadOptions options;
|
||||
options.mempool = Assert(m_node.mempool.get());
|
||||
options.block_tree_db_in_memory = m_block_tree_db_in_memory;
|
||||
options.coins_db_in_memory = m_coins_db_in_memory;
|
||||
options.wipe_block_tree_db = m_args.GetBoolArg("-reindex", false);
|
||||
options.wipe_chainstate_db = m_args.GetBoolArg("-reindex", false) || m_args.GetBoolArg("-reindex-chainstate", false);
|
||||
options.prune = chainman.m_blockman.IsPruneMode();
|
||||
options.check_blocks = m_args.GetIntArg("-checkblocks", DEFAULT_CHECKBLOCKS);
|
||||
|
|
|
@ -393,6 +393,11 @@ struct SnapshotTestSetup : TestChain100Setup {
|
|||
.chainparams = chainman_opts.chainparams,
|
||||
.blocks_dir = m_args.GetBlocksDirPath(),
|
||||
.notifications = chainman_opts.notifications,
|
||||
.block_tree_db_params = DBParams{
|
||||
.path = chainman.m_options.datadir / "blocks" / "index",
|
||||
.cache_bytes = m_kernel_cache_sizes.block_tree_db,
|
||||
.memory_only = m_block_tree_db_in_memory,
|
||||
},
|
||||
};
|
||||
// For robustness, ensure the old manager is destroyed before creating a
|
||||
// new one.
|
||||
|
|
|
@ -5623,9 +5623,8 @@ double ChainstateManager::GuessVerificationProgress(const CBlockIndex* pindex) c
|
|||
return 0.0;
|
||||
}
|
||||
|
||||
if (!Assume(pindex->m_chain_tx_count > 0)) {
|
||||
LogWarning("Internal bug detected: block %d has unset m_chain_tx_count (%s %s). Please report this issue here: %s\n",
|
||||
pindex->nHeight, CLIENT_NAME, FormatFullVersion(), CLIENT_BUGREPORT);
|
||||
if (pindex->m_chain_tx_count == 0) {
|
||||
LogDebug(BCLog::VALIDATION, "Block %d has unset m_chain_tx_count. Unable to estimate verification progress.\n", pindex->nHeight);
|
||||
return 0.0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1745,20 +1745,27 @@ RPCHelpMan importdescriptors()
|
|||
if (scanned_time <= GetImportTimestamp(request, now) || results.at(i).exists("error")) {
|
||||
response.push_back(results.at(i));
|
||||
} else {
|
||||
std::string error_msg{strprintf("Rescan failed for descriptor with timestamp %d. There "
|
||||
"was an error reading a block from time %d, which is after or within %d seconds "
|
||||
"of key creation, and could contain transactions pertaining to the desc. As a "
|
||||
"result, transactions and coins using this desc may not appear in the wallet.",
|
||||
GetImportTimestamp(request, now), scanned_time - TIMESTAMP_WINDOW - 1, TIMESTAMP_WINDOW)};
|
||||
if (pwallet->chain().havePruned()) {
|
||||
error_msg += strprintf(" This error could be caused by pruning or data corruption "
|
||||
"(see bitcoind log for details) and could be dealt with by downloading and "
|
||||
"rescanning the relevant blocks (see -reindex option and rescanblockchain RPC).");
|
||||
} else if (pwallet->chain().hasAssumedValidChain()) {
|
||||
error_msg += strprintf(" This error is likely caused by an in-progress assumeutxo "
|
||||
"background sync. Check logs or getchainstates RPC for assumeutxo background "
|
||||
"sync progress and try again later.");
|
||||
} else {
|
||||
error_msg += strprintf(" This error could potentially caused by data corruption. If "
|
||||
"the issue persists you may want to reindex (see -reindex option).");
|
||||
}
|
||||
|
||||
UniValue result = UniValue(UniValue::VOBJ);
|
||||
result.pushKV("success", UniValue(false));
|
||||
result.pushKV(
|
||||
"error",
|
||||
JSONRPCError(
|
||||
RPC_MISC_ERROR,
|
||||
strprintf("Rescan failed for descriptor with timestamp %d. There was an error reading a "
|
||||
"block from time %d, which is after or within %d seconds of key creation, and "
|
||||
"could contain transactions pertaining to the desc. As a result, transactions "
|
||||
"and coins using this desc may not appear in the wallet. This error could be "
|
||||
"caused by pruning or data corruption (see bitcoind log for details) and could "
|
||||
"be dealt with by downloading and rescanning the relevant blocks (see -reindex "
|
||||
"option and rescanblockchain RPC).",
|
||||
GetImportTimestamp(request, now), scanned_time - TIMESTAMP_WINDOW - 1, TIMESTAMP_WINDOW)));
|
||||
result.pushKV("error", JSONRPCError(RPC_MISC_ERROR, error_msg));
|
||||
response.push_back(std::move(result));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -68,29 +68,26 @@ std::set<int> InterpretSubtractFeeFromOutputInstructions(const UniValue& sffo_in
|
|||
{
|
||||
std::set<int> sffo_set;
|
||||
if (sffo_instructions.isNull()) return sffo_set;
|
||||
if (sffo_instructions.isBool()) {
|
||||
if (sffo_instructions.get_bool()) sffo_set.insert(0);
|
||||
return sffo_set;
|
||||
}
|
||||
|
||||
for (const auto& sffo : sffo_instructions.getValues()) {
|
||||
int pos{-1};
|
||||
if (sffo.isStr()) {
|
||||
for (size_t i = 0; i < destinations.size(); ++i) {
|
||||
if (sffo.get_str() == destinations.at(i)) {
|
||||
sffo_set.insert(i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (sffo.isNum()) {
|
||||
int pos = sffo.getInt<int>();
|
||||
if (sffo_set.contains(pos))
|
||||
throw JSONRPCError(RPC_INVALID_PARAMETER, strprintf("Invalid parameter, duplicated position: %d", pos));
|
||||
if (pos < 0)
|
||||
throw JSONRPCError(RPC_INVALID_PARAMETER, strprintf("Invalid parameter, negative position: %d", pos));
|
||||
if (pos >= int(destinations.size()))
|
||||
throw JSONRPCError(RPC_INVALID_PARAMETER, strprintf("Invalid parameter, position too large: %d", pos));
|
||||
sffo_set.insert(pos);
|
||||
auto it = find(destinations.begin(), destinations.end(), sffo.get_str());
|
||||
if (it == destinations.end()) throw JSONRPCError(RPC_INVALID_PARAMETER, strprintf("Invalid parameter 'subtract fee from output', destination %s not found in tx outputs", sffo.get_str()));
|
||||
pos = it - destinations.begin();
|
||||
} else if (sffo.isNum()) {
|
||||
pos = sffo.getInt<int>();
|
||||
} else {
|
||||
throw JSONRPCError(RPC_INVALID_PARAMETER, strprintf("Invalid parameter 'subtract fee from output', invalid value type: %s", uvTypeName(sffo.type())));
|
||||
}
|
||||
|
||||
if (sffo_set.contains(pos))
|
||||
throw JSONRPCError(RPC_INVALID_PARAMETER, strprintf("Invalid parameter 'subtract fee from output', duplicated position: %d", pos));
|
||||
if (pos < 0)
|
||||
throw JSONRPCError(RPC_INVALID_PARAMETER, strprintf("Invalid parameter 'subtract fee from output', negative position: %d", pos));
|
||||
if (pos >= int(destinations.size()))
|
||||
throw JSONRPCError(RPC_INVALID_PARAMETER, strprintf("Invalid parameter 'subtract fee from output', position too large: %d", pos));
|
||||
sffo_set.insert(pos);
|
||||
}
|
||||
return sffo_set;
|
||||
}
|
||||
|
@ -310,10 +307,13 @@ RPCHelpMan sendtoaddress()
|
|||
UniValue address_amounts(UniValue::VOBJ);
|
||||
const std::string address = request.params[0].get_str();
|
||||
address_amounts.pushKV(address, request.params[1]);
|
||||
std::vector<CRecipient> recipients = CreateRecipients(
|
||||
ParseOutputs(address_amounts),
|
||||
InterpretSubtractFeeFromOutputInstructions(request.params[4], address_amounts.getKeys())
|
||||
);
|
||||
|
||||
std::set<int> sffo_set;
|
||||
if (!request.params[4].isNull() && request.params[4].get_bool()) {
|
||||
sffo_set.insert(0);
|
||||
}
|
||||
|
||||
std::vector<CRecipient> recipients{CreateRecipients(ParseOutputs(address_amounts), sffo_set)};
|
||||
const bool verbose{request.params[10].isNull() ? false : request.params[10].get_bool()};
|
||||
|
||||
return SendMoney(*pwallet, coin_control, recipients, mapValue, verbose);
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
#include <key_io.h>
|
||||
#include <policy/rbf.h>
|
||||
#include <rpc/util.h>
|
||||
#include <rpc/blockchain.h>
|
||||
#include <util/vector.h>
|
||||
#include <wallet/receive.h>
|
||||
#include <wallet/rpc/util.h>
|
||||
|
@ -909,9 +910,15 @@ RPCHelpMan rescanblockchain()
|
|||
}
|
||||
}
|
||||
|
||||
// We can't rescan beyond non-pruned blocks, stop and throw an error
|
||||
// We can't rescan unavailable blocks, stop and throw an error
|
||||
if (!pwallet->chain().hasBlocks(pwallet->GetLastBlockHash(), start_height, stop_height)) {
|
||||
throw JSONRPCError(RPC_MISC_ERROR, "Can't rescan beyond pruned data. Use RPC call getblockchaininfo to determine your pruned height.");
|
||||
if (pwallet->chain().havePruned() && pwallet->chain().getPruneHeight() >= start_height) {
|
||||
throw JSONRPCError(RPC_MISC_ERROR, "Can't rescan beyond pruned data. Use RPC call getblockchaininfo to determine your pruned height.");
|
||||
}
|
||||
if (pwallet->chain().hasAssumedValidChain()) {
|
||||
throw JSONRPCError(RPC_MISC_ERROR, "Failed to rescan unavailable blocks likely due to an in-progress assumeutxo background sync. Check logs or getchainstates RPC for assumeutxo background sync progress and try again later.");
|
||||
}
|
||||
throw JSONRPCError(RPC_MISC_ERROR, "Failed to rescan unavailable blocks, potentially caused by data corruption. If the issue persists you may want to reindex (see -reindex option).");
|
||||
}
|
||||
|
||||
CHECK_NONFATAL(pwallet->chain().findAncestorByHeight(pwallet->GetLastBlockHash(), start_height, FoundBlock().hash(start_block)));
|
||||
|
|
|
@ -153,6 +153,8 @@ class MiningTest(BitcoinTestFramework):
|
|||
# The template will have an adjusted timestamp, which we then modify
|
||||
tmpl = node.getblocktemplate(NORMAL_GBT_REQUEST_PARAMS)
|
||||
assert_greater_than_or_equal(tmpl['curtime'], t + MAX_FUTURE_BLOCK_TIME - MAX_TIMEWARP)
|
||||
# mintime and curtime should match
|
||||
assert_equal(tmpl['mintime'], tmpl['curtime'])
|
||||
|
||||
block = CBlock()
|
||||
block.nVersion = tmpl["version"]
|
||||
|
|
|
@ -144,6 +144,11 @@ class PackageRelayTest(BitcoinTestFramework):
|
|||
for tx in transactions_to_presend[i]:
|
||||
peer.send_and_ping(msg_tx(tx))
|
||||
|
||||
# Disconnect python peers to clear outstanding orphan requests with them, avoiding timeouts.
|
||||
# We are only interested in the syncing behavior between real nodes.
|
||||
for i in range(self.num_nodes):
|
||||
self.nodes[i].disconnect_p2ps()
|
||||
|
||||
self.log.info("Submit full packages to node0")
|
||||
for package_hex in packages_to_submit:
|
||||
submitpackage_result = self.nodes[0].submitpackage(package_hex)
|
||||
|
|
|
@ -115,7 +115,7 @@ class P2PBlocksOnly(BitcoinTestFramework):
|
|||
self.log.info('Check that txs from P2P are rejected and result in disconnect')
|
||||
spendtx = self.miniwallet.create_self_transfer()
|
||||
|
||||
with self.nodes[0].assert_debug_log(['transaction sent in violation of protocol peer=0']):
|
||||
with self.nodes[0].assert_debug_log(['transaction sent in violation of protocol, disconnecting peer=0']):
|
||||
self.nodes[0].p2ps[0].send_message(msg_tx(spendtx['tx']))
|
||||
self.nodes[0].p2ps[0].wait_for_disconnect()
|
||||
assert_equal(self.nodes[0].getmempoolinfo()['size'], 0)
|
||||
|
|
|
@ -549,6 +549,7 @@ class BlockchainTest(BitcoinTestFramework):
|
|||
# The chain has probably already been restored by the time reconsiderblock returns,
|
||||
# but poll anyway.
|
||||
self.wait_until(lambda: node.waitfornewblock(timeout=100)['hash'] == current_hash)
|
||||
assert_raises_rpc_error(-1, "Negative timeout", node.waitfornewblock, -1)
|
||||
|
||||
def _test_waitforblockheight(self):
|
||||
self.log.info("Test waitforblockheight")
|
||||
|
|
|
@ -6,20 +6,18 @@
|
|||
Utilities for working directly with the wallet's BDB database file
|
||||
|
||||
This is specific to the configuration of BDB used in this project:
|
||||
- pagesize: 4096 bytes
|
||||
- Outer database contains single subdatabase named 'main'
|
||||
- btree
|
||||
- btree leaf pages
|
||||
- btree internal, leaf and overflow pages
|
||||
|
||||
Each key-value pair is two entries in a btree leaf. The first is the key, the one that follows
|
||||
Each key-value pair is two entries in a btree leaf, which optionally refers to overflow pages
|
||||
if the data doesn't fit into a single page. The first entry is the key, the one that follows
|
||||
is the value. And so on. Note that the entry data is itself not in the correct order. Instead
|
||||
entry offsets are stored in the correct order and those offsets are needed to then retrieve
|
||||
the data itself.
|
||||
the data itself. Note that this implementation currently only supports reading databases that
|
||||
are in the same endianness as the host.
|
||||
|
||||
Page format can be found in BDB source code dbinc/db_page.h
|
||||
This only implements the deserialization of btree metadata pages and normal btree pages. Overflow
|
||||
pages are not implemented but may be needed in the future if dealing with wallets with large
|
||||
transactions.
|
||||
|
||||
`db_dump -da wallet.dat` is useful to see the data in a wallet.dat BDB file
|
||||
"""
|
||||
|
@ -27,23 +25,36 @@ transactions.
|
|||
import struct
|
||||
|
||||
# Important constants
|
||||
PAGESIZE = 4096
|
||||
PAGE_HEADER_SIZE = 26
|
||||
OUTER_META_PAGE = 0
|
||||
INNER_META_PAGE = 2
|
||||
|
||||
# Page type values
|
||||
BTREE_INTERNAL = 3
|
||||
BTREE_LEAF = 5
|
||||
OVERFLOW_DATA = 7
|
||||
BTREE_META = 9
|
||||
|
||||
# Record type values
|
||||
RECORD_KEYDATA = 1
|
||||
RECORD_OVERFLOW_DATA = 3
|
||||
|
||||
# Some magic numbers for sanity checking
|
||||
BTREE_MAGIC = 0x053162
|
||||
DB_VERSION = 9
|
||||
SUBDATABASE_NAME = b'main'
|
||||
|
||||
# Deserializes a leaf page into a dict.
|
||||
# Btree internal pages have the same header, for those, return None.
|
||||
# For the btree leaf pages, deserialize them and put all the data into a dict
|
||||
def dump_leaf_page(data):
|
||||
# Deserializes an internal, leaf or overflow page into a dict.
|
||||
# In addition to the common page header fields, the result contains an 'entries'
|
||||
# array of dicts with the following fields, depending on the page type:
|
||||
# internal page [BTREE_INTERNAL]:
|
||||
# - 'page_num': referenced page number (used to find further pages to process)
|
||||
# leaf page [BTREE_LEAF]:
|
||||
# - 'record_type': record type, must be RECORD_KEYDATA or RECORD_OVERFLOW_DATA
|
||||
# - 'data': binary data (key or value payload), if record type is RECORD_KEYDATA
|
||||
# - 'page_num': referenced overflow page number, if record type is RECORD_OVERFLOW_DATA
|
||||
# overflow page [OVERFLOW_DATA]:
|
||||
# - 'data': binary data (part of key or value payload)
|
||||
def dump_page(data):
|
||||
page_info = {}
|
||||
page_header = data[0:26]
|
||||
_, pgno, prev_pgno, next_pgno, entries, hf_offset, level, pg_type = struct.unpack('QIIIHHBB', page_header)
|
||||
|
@ -56,20 +67,35 @@ def dump_leaf_page(data):
|
|||
page_info['entry_offsets'] = struct.unpack('{}H'.format(entries), data[26:26 + entries * 2])
|
||||
page_info['entries'] = []
|
||||
|
||||
if pg_type == BTREE_INTERNAL:
|
||||
# Skip internal pages. These are the internal nodes of the btree and don't contain anything relevant to us
|
||||
return None
|
||||
assert pg_type in (BTREE_INTERNAL, BTREE_LEAF, OVERFLOW_DATA)
|
||||
|
||||
assert pg_type == BTREE_LEAF, 'A non-btree leaf page has been encountered while dumping leaves'
|
||||
if pg_type == OVERFLOW_DATA:
|
||||
assert entries == 1
|
||||
page_info['entries'].append({'data': data[26:26 + hf_offset]})
|
||||
return page_info
|
||||
|
||||
for i in range(0, entries):
|
||||
entry = {}
|
||||
offset = page_info['entry_offsets'][i]
|
||||
entry = {'offset': offset}
|
||||
page_data_header = data[offset:offset + 3]
|
||||
e_len, pg_type = struct.unpack('HB', page_data_header)
|
||||
entry['len'] = e_len
|
||||
entry['pg_type'] = pg_type
|
||||
entry['data'] = data[offset + 3:offset + 3 + e_len]
|
||||
record_header = data[offset:offset + 3]
|
||||
offset += 3
|
||||
e_len, record_type = struct.unpack('HB', record_header)
|
||||
|
||||
if pg_type == BTREE_INTERNAL:
|
||||
assert record_type == RECORD_KEYDATA
|
||||
internal_record_data = data[offset:offset + 9]
|
||||
_, page_num, _ = struct.unpack('=BII', internal_record_data)
|
||||
entry['page_num'] = page_num
|
||||
elif pg_type == BTREE_LEAF:
|
||||
assert record_type in (RECORD_KEYDATA, RECORD_OVERFLOW_DATA)
|
||||
entry['record_type'] = record_type
|
||||
if record_type == RECORD_KEYDATA:
|
||||
entry['data'] = data[offset:offset + e_len]
|
||||
elif record_type == RECORD_OVERFLOW_DATA:
|
||||
overflow_record_data = data[offset:offset + 9]
|
||||
_, page_num, _ = struct.unpack('=BII', overflow_record_data)
|
||||
entry['page_num'] = page_num
|
||||
|
||||
page_info['entries'].append(entry)
|
||||
|
||||
return page_info
|
||||
|
@ -115,16 +141,27 @@ def dump_meta_page(page):
|
|||
return metadata
|
||||
|
||||
# Given the dict from dump_leaf_page, get the key-value pairs and put them into a dict
|
||||
def extract_kv_pairs(page_data):
|
||||
def extract_kv_pairs(page_data, pages):
|
||||
out = {}
|
||||
last_key = None
|
||||
for i, entry in enumerate(page_data['entries']):
|
||||
data = b''
|
||||
if entry['record_type'] == RECORD_KEYDATA:
|
||||
data = entry['data']
|
||||
elif entry['record_type'] == RECORD_OVERFLOW_DATA:
|
||||
next_page = entry['page_num']
|
||||
while next_page != 0:
|
||||
opage = pages[next_page]
|
||||
opage_info = dump_page(opage)
|
||||
data += opage_info['entries'][0]['data']
|
||||
next_page = opage_info['next_pgno']
|
||||
|
||||
# By virtue of these all being pairs, even number entries are keys, and odd are values
|
||||
if i % 2 == 0:
|
||||
out[entry['data']] = b''
|
||||
last_key = entry['data']
|
||||
last_key = data
|
||||
else:
|
||||
out[last_key] = entry['data']
|
||||
out[last_key] = data
|
||||
return out
|
||||
|
||||
# Extract the key-value pairs of the BDB file given in filename
|
||||
|
@ -132,20 +169,42 @@ def dump_bdb_kv(filename):
|
|||
# Read in the BDB file and start deserializing it
|
||||
pages = []
|
||||
with open(filename, 'rb') as f:
|
||||
data = f.read(PAGESIZE)
|
||||
# Determine pagesize first
|
||||
data = f.read(PAGE_HEADER_SIZE)
|
||||
pagesize = struct.unpack('I', data[20:24])[0]
|
||||
assert pagesize in (512, 1024, 2048, 4096, 8192, 16384, 32768, 65536)
|
||||
|
||||
# Read rest of first page
|
||||
data += f.read(pagesize - PAGE_HEADER_SIZE)
|
||||
assert len(data) == pagesize
|
||||
|
||||
# Read all remaining pages
|
||||
while len(data) > 0:
|
||||
pages.append(data)
|
||||
data = f.read(PAGESIZE)
|
||||
data = f.read(pagesize)
|
||||
|
||||
# Sanity check the meta pages
|
||||
dump_meta_page(pages[OUTER_META_PAGE])
|
||||
dump_meta_page(pages[INNER_META_PAGE])
|
||||
# Sanity check the meta pages, read root page
|
||||
outer_meta_info = dump_meta_page(pages[OUTER_META_PAGE])
|
||||
root_page_info = dump_page(pages[outer_meta_info['root']])
|
||||
assert root_page_info['pg_type'] == BTREE_LEAF
|
||||
assert len(root_page_info['entries']) == 2
|
||||
assert root_page_info['entries'][0]['data'] == SUBDATABASE_NAME
|
||||
assert len(root_page_info['entries'][1]['data']) == 4
|
||||
inner_meta_page = int.from_bytes(root_page_info['entries'][1]['data'], 'big')
|
||||
inner_meta_info = dump_meta_page(pages[inner_meta_page])
|
||||
|
||||
# Fetch the kv pairs from the leaf pages
|
||||
# Fetch the kv pairs from the pages
|
||||
kv = {}
|
||||
for i in range(3, len(pages)):
|
||||
info = dump_leaf_page(pages[i])
|
||||
if info is not None:
|
||||
info_kv = extract_kv_pairs(info)
|
||||
pages_to_process = [inner_meta_info['root']]
|
||||
while len(pages_to_process) > 0:
|
||||
curr_page_no = pages_to_process.pop()
|
||||
assert curr_page_no <= outer_meta_info['last_pgno']
|
||||
info = dump_page(pages[curr_page_no])
|
||||
assert info['pg_type'] in (BTREE_INTERNAL, BTREE_LEAF)
|
||||
if info['pg_type'] == BTREE_INTERNAL:
|
||||
for entry in info['entries']:
|
||||
pages_to_process.append(entry['page_num'])
|
||||
elif info['pg_type'] == BTREE_LEAF:
|
||||
info_kv = extract_kv_pairs(info, pages)
|
||||
kv = {**kv, **info_kv}
|
||||
return kv
|
||||
|
|
|
@ -6,18 +6,23 @@
|
|||
|
||||
import os
|
||||
import platform
|
||||
import random
|
||||
import stat
|
||||
import string
|
||||
import subprocess
|
||||
import textwrap
|
||||
|
||||
from collections import OrderedDict
|
||||
|
||||
from test_framework.bdb import dump_bdb_kv
|
||||
from test_framework.messages import ser_string
|
||||
from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.util import (
|
||||
assert_equal,
|
||||
assert_greater_than,
|
||||
sha256sum_file,
|
||||
)
|
||||
from test_framework.wallet import getnewdestination
|
||||
|
||||
|
||||
class ToolWalletTest(BitcoinTestFramework):
|
||||
|
@ -545,6 +550,44 @@ class ToolWalletTest(BitcoinTestFramework):
|
|||
self.stop_node(0)
|
||||
self.assert_tool_output("The dumpfile may contain private keys. To ensure the safety of your Bitcoin, do not share the dumpfile.\n", "-wallet=unclean_lsn", f"-dumpfile={wallet_dump}", "dump")
|
||||
|
||||
def test_compare_legacy_dump_with_framework_bdb_parser(self):
|
||||
self.log.info("Verify that legacy wallet database dump matches the one from the test framework's BDB parser")
|
||||
wallet_name = "bdb_ro_test"
|
||||
self.start_node(0)
|
||||
# add some really large labels (above twice the largest valid page size) to create BDB overflow pages
|
||||
self.nodes[0].createwallet(wallet_name)
|
||||
wallet_rpc = self.nodes[0].get_wallet_rpc(wallet_name)
|
||||
generated_labels = {}
|
||||
for i in range(10):
|
||||
address = getnewdestination()[2]
|
||||
large_label = ''.join([random.choice(string.ascii_letters) for _ in range(150000)])
|
||||
wallet_rpc.setlabel(address, large_label)
|
||||
generated_labels[address] = large_label
|
||||
# fill the keypool to create BDB internal pages
|
||||
wallet_rpc.keypoolrefill(1000)
|
||||
self.stop_node(0)
|
||||
|
||||
wallet_dumpfile = self.nodes[0].datadir_path / "bdb_ro_test.dump"
|
||||
self.assert_tool_output("The dumpfile may contain private keys. To ensure the safety of your Bitcoin, do not share the dumpfile.\n", "-wallet={}".format(wallet_name), "-dumpfile={}".format(wallet_dumpfile), "dump")
|
||||
|
||||
expected_dump = self.read_dump(wallet_dumpfile)
|
||||
# remove extra entries from wallet tool dump that are not actual key/value pairs from the database
|
||||
del expected_dump['BITCOIN_CORE_WALLET_DUMP']
|
||||
del expected_dump['format']
|
||||
del expected_dump['checksum']
|
||||
bdb_ro_parser_dump_raw = dump_bdb_kv(self.nodes[0].wallets_path / wallet_name / "wallet.dat")
|
||||
bdb_ro_parser_dump = OrderedDict()
|
||||
assert any([len(bytes.fromhex(value)) >= 150000 for value in expected_dump.values()])
|
||||
for key, value in sorted(bdb_ro_parser_dump_raw.items()):
|
||||
bdb_ro_parser_dump[key.hex()] = value.hex()
|
||||
assert_equal(bdb_ro_parser_dump, expected_dump)
|
||||
|
||||
# check that all labels were created with the correct address
|
||||
for address, label in generated_labels.items():
|
||||
key_bytes = b'\x04name' + ser_string(address.encode())
|
||||
assert key_bytes in bdb_ro_parser_dump_raw
|
||||
assert_equal(bdb_ro_parser_dump_raw[key_bytes], ser_string(label.encode()))
|
||||
|
||||
def run_test(self):
|
||||
self.wallet_path = self.nodes[0].wallets_path / self.default_wallet_name / self.wallet_data_filename
|
||||
self.test_invalid_tool_commands_and_args()
|
||||
|
@ -561,6 +604,9 @@ class ToolWalletTest(BitcoinTestFramework):
|
|||
self.test_dump_createfromdump()
|
||||
self.test_chainless_conflicts()
|
||||
self.test_dump_very_large_records()
|
||||
if not self.options.descriptors and self.is_bdb_compiled() and not self.options.swap_bdb_endian:
|
||||
self.test_compare_legacy_dump_with_framework_bdb_parser()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
ToolWalletTest(__file__).main()
|
||||
|
|
|
@ -7,11 +7,11 @@ See feature_assumeutxo.py for background.
|
|||
|
||||
## Possible test improvements
|
||||
|
||||
- TODO: test import descriptors while background sync is in progress
|
||||
- TODO: test loading a wallet (backup) on a pruned node
|
||||
|
||||
"""
|
||||
from test_framework.address import address_to_scriptpubkey
|
||||
from test_framework.descriptors import descsum_create
|
||||
from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.messages import COIN
|
||||
from test_framework.util import (
|
||||
|
@ -20,6 +20,7 @@ from test_framework.util import (
|
|||
ensure_for,
|
||||
)
|
||||
from test_framework.wallet import MiniWallet
|
||||
from test_framework.wallet_util import get_generate_key
|
||||
|
||||
START_HEIGHT = 199
|
||||
SNAPSHOT_BASE_HEIGHT = 299
|
||||
|
@ -49,6 +50,13 @@ class AssumeutxoTest(BitcoinTestFramework):
|
|||
self.add_nodes(3)
|
||||
self.start_nodes(extra_args=self.extra_args)
|
||||
|
||||
def import_descriptor(self, node, wallet_name, key, timestamp):
|
||||
import_request = [{"desc": descsum_create("pkh(" + key.pubkey + ")"),
|
||||
"timestamp": timestamp,
|
||||
"label": "Descriptor import test"}]
|
||||
wrpc = node.get_wallet_rpc(wallet_name)
|
||||
return wrpc.importdescriptors(import_request)
|
||||
|
||||
def run_test(self):
|
||||
"""
|
||||
Bring up two (disconnected) nodes, mine some new blocks on the first,
|
||||
|
@ -157,6 +165,21 @@ class AssumeutxoTest(BitcoinTestFramework):
|
|||
self.log.info("Backup from before the snapshot height can't be loaded during background sync")
|
||||
assert_raises_rpc_error(-4, "Wallet loading failed. Error loading wallet. Wallet requires blocks to be downloaded, and software does not currently support loading wallets while blocks are being downloaded out of order when using assumeutxo snapshots. Wallet should be able to load successfully after node sync reaches height 299", n1.restorewallet, "w2", "backup_w2.dat")
|
||||
|
||||
self.log.info("Test loading descriptors during background sync")
|
||||
wallet_name = "w1"
|
||||
n1.createwallet(wallet_name, disable_private_keys=True)
|
||||
key = get_generate_key()
|
||||
time = n1.getblockchaininfo()['time']
|
||||
timestamp = 0
|
||||
expected_error_message = f"Rescan failed for descriptor with timestamp {timestamp}. There was an error reading a block from time {time}, which is after or within 7200 seconds of key creation, and could contain transactions pertaining to the desc. As a result, transactions and coins using this desc may not appear in the wallet. This error is likely caused by an in-progress assumeutxo background sync. Check logs or getchainstates RPC for assumeutxo background sync progress and try again later."
|
||||
result = self.import_descriptor(n1, wallet_name, key, timestamp)
|
||||
assert_equal(result[0]['error']['code'], -1)
|
||||
assert_equal(result[0]['error']['message'], expected_error_message)
|
||||
|
||||
self.log.info("Test that rescanning blocks from before the snapshot fails when blocks are not available from the background sync yet")
|
||||
w1 = n1.get_wallet_rpc(wallet_name)
|
||||
assert_raises_rpc_error(-1, "Failed to rescan unavailable blocks likely due to an in-progress assumeutxo background sync. Check logs or getchainstates RPC for assumeutxo background sync progress and try again later.", w1.rescanblockchain, 100)
|
||||
|
||||
PAUSE_HEIGHT = FINAL_HEIGHT - 40
|
||||
|
||||
self.log.info("Restarting node to stop at height %d", PAUSE_HEIGHT)
|
||||
|
@ -204,6 +227,11 @@ class AssumeutxoTest(BitcoinTestFramework):
|
|||
self.wait_until(lambda: len(n2.getchainstates()['chainstates']) == 1)
|
||||
ensure_for(duration=1, f=lambda: (n2.getbalance() == 34))
|
||||
|
||||
self.log.info("Ensuring descriptors can be loaded after background sync")
|
||||
n1.loadwallet(wallet_name)
|
||||
result = self.import_descriptor(n1, wallet_name, key, timestamp)
|
||||
assert_equal(result[0]['success'], True)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
AssumeutxoTest(__file__).main()
|
||||
|
|
|
@ -5,17 +5,17 @@
|
|||
"""Test the sendmany RPC command."""
|
||||
|
||||
from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.util import assert_raises_rpc_error
|
||||
|
||||
|
||||
class SendmanyTest(BitcoinTestFramework):
|
||||
# Setup and helpers
|
||||
def add_options(self, parser):
|
||||
self.add_wallet_options(parser)
|
||||
|
||||
|
||||
def skip_test_if_missing_module(self):
|
||||
self.skip_if_no_wallet()
|
||||
|
||||
|
||||
def set_test_params(self):
|
||||
self.num_nodes = 1
|
||||
self.setup_clean_chain = True
|
||||
|
@ -26,9 +26,19 @@ class SendmanyTest(BitcoinTestFramework):
|
|||
addr_3 = self.wallet.getnewaddress()
|
||||
|
||||
self.log.info("Test using duplicate address in SFFO argument")
|
||||
self.def_wallet.sendmany(dummy='', amounts={addr_1: 1, addr_2: 1}, subtractfeefrom=[addr_1, addr_1, addr_1])
|
||||
assert_raises_rpc_error(-8, "Invalid parameter 'subtract fee from output', duplicated position: 0", self.def_wallet.sendmany, dummy='', amounts={addr_1: 1, addr_2: 1}, subtractfeefrom=[addr_1, addr_1, addr_1])
|
||||
self.log.info("Test using address not present in tx.vout in SFFO argument")
|
||||
self.def_wallet.sendmany(dummy='', amounts={addr_1: 1, addr_2: 1}, subtractfeefrom=[addr_3])
|
||||
assert_raises_rpc_error(-8, f"Invalid parameter 'subtract fee from output', destination {addr_3} not found in tx outputs", self.def_wallet.sendmany, dummy='', amounts={addr_1: 1, addr_2: 1}, subtractfeefrom=[addr_3])
|
||||
self.log.info("Test using negative index in SFFO argument")
|
||||
assert_raises_rpc_error(-8, "Invalid parameter 'subtract fee from output', negative position: -5", self.def_wallet.sendmany, dummy='', amounts={addr_1: 1, addr_2: 1}, subtractfeefrom=[-5])
|
||||
self.log.info("Test using an out of bounds index in SFFO argument")
|
||||
assert_raises_rpc_error(-8, "Invalid parameter 'subtract fee from output', position too large: 5", self.def_wallet.sendmany, dummy='', amounts={addr_1: 1, addr_2: 1}, subtractfeefrom=[5])
|
||||
self.log.info("Test using an unexpected type in SFFO argument")
|
||||
assert_raises_rpc_error(-8, "Invalid parameter 'subtract fee from output', invalid value type: bool", self.def_wallet.sendmany, dummy='', amounts={addr_1: 1, addr_2: 1}, subtractfeefrom=[False])
|
||||
self.log.info("Test duplicates in SFFO argument, mix string destinations with numeric indexes")
|
||||
assert_raises_rpc_error(-8, "Invalid parameter 'subtract fee from output', duplicated position: 0", self.def_wallet.sendmany, dummy='', amounts={addr_1: 1, addr_2: 1}, subtractfeefrom=[0, addr_1])
|
||||
self.log.info("Test valid mixing of string destinations with numeric indexes in SFFO argument")
|
||||
self.def_wallet.sendmany(dummy='', amounts={addr_1: 1, addr_2: 1}, subtractfeefrom=[0, addr_2])
|
||||
|
||||
def run_test(self):
|
||||
self.nodes[0].createwallet("activewallet")
|
||||
|
|
Loading…
Add table
Reference in a new issue