mirror of
https://github.com/bitcoin/bitcoin.git
synced 2025-02-02 09:46:52 -05:00
test: move sync_blocks and sync_mempool functions to test_framework.py
This commit is contained in:
parent
9ad6f14175
commit
cc84460c16
5 changed files with 54 additions and 64 deletions
|
@ -28,8 +28,6 @@ from test_framework.descriptors import descsum_create
|
|||
from test_framework.util import (
|
||||
adjust_bitcoin_conf_for_pre_17,
|
||||
assert_equal,
|
||||
sync_blocks,
|
||||
sync_mempools,
|
||||
)
|
||||
|
||||
|
||||
|
@ -68,7 +66,7 @@ class BackwardsCompatibilityTest(BitcoinTestFramework):
|
|||
def run_test(self):
|
||||
self.nodes[0].generatetoaddress(101, self.nodes[0].getnewaddress())
|
||||
|
||||
sync_blocks(self.nodes)
|
||||
self.sync_blocks()
|
||||
|
||||
# Sanity check the test framework:
|
||||
res = self.nodes[self.num_nodes - 1].getblockchaininfo()
|
||||
|
@ -93,17 +91,17 @@ class BackwardsCompatibilityTest(BitcoinTestFramework):
|
|||
# Create a confirmed transaction, receiving coins
|
||||
address = wallet.getnewaddress()
|
||||
self.nodes[0].sendtoaddress(address, 10)
|
||||
sync_mempools(self.nodes)
|
||||
self.sync_mempools()
|
||||
self.nodes[0].generate(1)
|
||||
sync_blocks(self.nodes)
|
||||
self.sync_blocks()
|
||||
# Create a conflicting transaction using RBF
|
||||
return_address = self.nodes[0].getnewaddress()
|
||||
tx1_id = self.nodes[1].sendtoaddress(return_address, 1)
|
||||
tx2_id = self.nodes[1].bumpfee(tx1_id)["txid"]
|
||||
# Confirm the transaction
|
||||
sync_mempools(self.nodes)
|
||||
self.sync_mempools()
|
||||
self.nodes[0].generate(1)
|
||||
sync_blocks(self.nodes)
|
||||
self.sync_blocks()
|
||||
# Create another conflicting transaction using RBF
|
||||
tx3_id = self.nodes[1].sendtoaddress(return_address, 1)
|
||||
tx4_id = self.nodes[1].bumpfee(tx3_id)["txid"]
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.util import (
|
||||
assert_equal, assert_is_hex_string, assert_raises_rpc_error,
|
||||
connect_nodes, disconnect_nodes, sync_blocks
|
||||
connect_nodes, disconnect_nodes
|
||||
)
|
||||
|
||||
FILTER_TYPES = ["basic"]
|
||||
|
@ -30,7 +30,7 @@ class GetBlockFilterTest(BitcoinTestFramework):
|
|||
|
||||
# Reorg node 0 to a new chain
|
||||
connect_nodes(self.nodes[0], 1)
|
||||
sync_blocks(self.nodes)
|
||||
self.sync_blocks()
|
||||
|
||||
assert_equal(self.nodes[0].getblockcount(), 4)
|
||||
chain1_hashes = [self.nodes[0].getblockhash(block_height) for block_height in range(4)]
|
||||
|
|
|
@ -31,8 +31,6 @@ from .util import (
|
|||
disconnect_nodes,
|
||||
get_datadir_path,
|
||||
initialize_datadir,
|
||||
sync_blocks,
|
||||
sync_mempools,
|
||||
)
|
||||
|
||||
|
||||
|
@ -541,15 +539,54 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
|
|||
connect_nodes(self.nodes[1], 2)
|
||||
self.sync_all()
|
||||
|
||||
def sync_blocks(self, nodes=None, **kwargs):
|
||||
sync_blocks(nodes or self.nodes, **kwargs)
|
||||
def sync_blocks(self, nodes=None, wait=1, timeout=60):
|
||||
"""
|
||||
Wait until everybody has the same tip.
|
||||
sync_blocks needs to be called with an rpc_connections set that has least
|
||||
one node already synced to the latest, stable tip, otherwise there's a
|
||||
chance it might return before all nodes are stably synced.
|
||||
"""
|
||||
rpc_connections = nodes or self.nodes
|
||||
timeout = int(timeout * self.options.timeout_factor)
|
||||
stop_time = time.time() + timeout
|
||||
while time.time() <= stop_time:
|
||||
best_hash = [x.getbestblockhash() for x in rpc_connections]
|
||||
if best_hash.count(best_hash[0]) == len(rpc_connections):
|
||||
return
|
||||
# Check that each peer has at least one connection
|
||||
assert (all([len(x.getpeerinfo()) for x in rpc_connections]))
|
||||
time.sleep(wait)
|
||||
raise AssertionError("Block sync timed out after {}s:{}".format(
|
||||
timeout,
|
||||
"".join("\n {!r}".format(b) for b in best_hash),
|
||||
))
|
||||
|
||||
def sync_mempools(self, nodes=None, **kwargs):
|
||||
sync_mempools(nodes or self.nodes, **kwargs)
|
||||
def sync_mempools(self, nodes=None, wait=1, timeout=60, flush_scheduler=True):
|
||||
"""
|
||||
Wait until everybody has the same transactions in their memory
|
||||
pools
|
||||
"""
|
||||
rpc_connections = nodes or self.nodes
|
||||
timeout = int(timeout * self.options.timeout_factor)
|
||||
stop_time = time.time() + timeout
|
||||
while time.time() <= stop_time:
|
||||
pool = [set(r.getrawmempool()) for r in rpc_connections]
|
||||
if pool.count(pool[0]) == len(rpc_connections):
|
||||
if flush_scheduler:
|
||||
for r in rpc_connections:
|
||||
r.syncwithvalidationinterfacequeue()
|
||||
return
|
||||
# Check that each peer has at least one connection
|
||||
assert (all([len(x.getpeerinfo()) for x in rpc_connections]))
|
||||
time.sleep(wait)
|
||||
raise AssertionError("Mempool sync timed out after {}s:{}".format(
|
||||
timeout,
|
||||
"".join("\n {!r}".format(m) for m in pool),
|
||||
))
|
||||
|
||||
def sync_all(self, nodes=None, **kwargs):
|
||||
self.sync_blocks(nodes, **kwargs)
|
||||
self.sync_mempools(nodes, **kwargs)
|
||||
def sync_all(self, nodes=None):
|
||||
self.sync_blocks(nodes)
|
||||
self.sync_mempools(nodes)
|
||||
|
||||
# Private helper methods. These should not be accessed by the subclass test scripts.
|
||||
|
||||
|
|
|
@ -420,50 +420,6 @@ def connect_nodes(from_connection, node_num):
|
|||
wait_until(lambda: all(peer['bytesrecv_per_msg'].pop('verack', 0) == 24 for peer in from_connection.getpeerinfo()))
|
||||
|
||||
|
||||
def sync_blocks(rpc_connections, *, wait=1, timeout=60):
|
||||
"""
|
||||
Wait until everybody has the same tip.
|
||||
|
||||
sync_blocks needs to be called with an rpc_connections set that has least
|
||||
one node already synced to the latest, stable tip, otherwise there's a
|
||||
chance it might return before all nodes are stably synced.
|
||||
"""
|
||||
stop_time = time.time() + timeout
|
||||
while time.time() <= stop_time:
|
||||
best_hash = [x.getbestblockhash() for x in rpc_connections]
|
||||
if best_hash.count(best_hash[0]) == len(rpc_connections):
|
||||
return
|
||||
# Check that each peer has at least one connection
|
||||
assert (all([len(x.getpeerinfo()) for x in rpc_connections]))
|
||||
time.sleep(wait)
|
||||
raise AssertionError("Block sync timed out after {}s:{}".format(
|
||||
timeout,
|
||||
"".join("\n {!r}".format(b) for b in best_hash),
|
||||
))
|
||||
|
||||
|
||||
def sync_mempools(rpc_connections, *, wait=1, timeout=60, flush_scheduler=True):
|
||||
"""
|
||||
Wait until everybody has the same transactions in their memory
|
||||
pools
|
||||
"""
|
||||
stop_time = time.time() + timeout
|
||||
while time.time() <= stop_time:
|
||||
pool = [set(r.getrawmempool()) for r in rpc_connections]
|
||||
if pool.count(pool[0]) == len(rpc_connections):
|
||||
if flush_scheduler:
|
||||
for r in rpc_connections:
|
||||
r.syncwithvalidationinterfacequeue()
|
||||
return
|
||||
# Check that each peer has at least one connection
|
||||
assert (all([len(x.getpeerinfo()) for x in rpc_connections]))
|
||||
time.sleep(wait)
|
||||
raise AssertionError("Mempool sync timed out after {}s:{}".format(
|
||||
timeout,
|
||||
"".join("\n {!r}".format(m) for m in pool),
|
||||
))
|
||||
|
||||
|
||||
# Transaction/Block functions
|
||||
#############################
|
||||
|
||||
|
|
|
@ -12,7 +12,6 @@ from test_framework.util import (
|
|||
assert_equal,
|
||||
assert_raises_rpc_error,
|
||||
connect_nodes,
|
||||
sync_blocks,
|
||||
)
|
||||
|
||||
|
||||
|
@ -264,7 +263,7 @@ class WalletTest(BitcoinTestFramework):
|
|||
# Now confirm tx_orig
|
||||
self.restart_node(1, ['-persistmempool=0'])
|
||||
connect_nodes(self.nodes[0], 1)
|
||||
sync_blocks(self.nodes)
|
||||
self.sync_blocks()
|
||||
self.nodes[1].sendrawtransaction(tx_orig)
|
||||
self.nodes[1].generatetoaddress(1, ADDRESS_WATCHONLY)
|
||||
self.sync_all()
|
||||
|
|
Loading…
Add table
Reference in a new issue