mirror of
https://github.com/bitcoin/bitcoin.git
synced 2025-03-05 14:06:27 -05:00
qa: split run_test into smaller parts
Let's not have run_test get into a giant function as we add more tests Signed-off-by: Antoine Poinsot <darosior@protonmail.com>
This commit is contained in:
parent
06c5ce9714
commit
053415b297
1 changed files with 40 additions and 28 deletions
|
@ -212,20 +212,16 @@ class EstimateFeeTest(BitcoinTestFramework):
|
||||||
newmem.append(utx)
|
newmem.append(utx)
|
||||||
self.memutxo = newmem
|
self.memutxo = newmem
|
||||||
|
|
||||||
def run_test(self):
|
def initial_split(self, node):
|
||||||
self.log.info("This test is time consuming, please be patient")
|
"""Split two coinbase UTxOs into many small coins"""
|
||||||
self.log.info("Splitting inputs so we can generate tx's")
|
|
||||||
|
|
||||||
# Start node0
|
|
||||||
self.start_node(0)
|
|
||||||
self.txouts = []
|
self.txouts = []
|
||||||
self.txouts2 = []
|
self.txouts2 = []
|
||||||
# Split a coinbase into two transaction puzzle outputs
|
# Split a coinbase into two transaction puzzle outputs
|
||||||
split_inputs(self.nodes[0], self.nodes[0].listunspent(0), self.txouts, True)
|
split_inputs(node, node.listunspent(0), self.txouts, True)
|
||||||
|
|
||||||
# Mine
|
# Mine
|
||||||
while len(self.nodes[0].getrawmempool()) > 0:
|
while len(node.getrawmempool()) > 0:
|
||||||
self.generate(self.nodes[0], 1)
|
self.generate(node, 1)
|
||||||
|
|
||||||
# Repeatedly split those 2 outputs, doubling twice for each rep
|
# Repeatedly split those 2 outputs, doubling twice for each rep
|
||||||
# Use txouts to monitor the available utxo, since these won't be tracked in wallet
|
# Use txouts to monitor the available utxo, since these won't be tracked in wallet
|
||||||
|
@ -233,27 +229,19 @@ class EstimateFeeTest(BitcoinTestFramework):
|
||||||
while reps < 5:
|
while reps < 5:
|
||||||
# Double txouts to txouts2
|
# Double txouts to txouts2
|
||||||
while len(self.txouts) > 0:
|
while len(self.txouts) > 0:
|
||||||
split_inputs(self.nodes[0], self.txouts, self.txouts2)
|
split_inputs(node, self.txouts, self.txouts2)
|
||||||
while len(self.nodes[0].getrawmempool()) > 0:
|
while len(node.getrawmempool()) > 0:
|
||||||
self.generate(self.nodes[0], 1)
|
self.generate(node, 1)
|
||||||
# Double txouts2 to txouts
|
# Double txouts2 to txouts
|
||||||
while len(self.txouts2) > 0:
|
while len(self.txouts2) > 0:
|
||||||
split_inputs(self.nodes[0], self.txouts2, self.txouts)
|
split_inputs(node, self.txouts2, self.txouts)
|
||||||
while len(self.nodes[0].getrawmempool()) > 0:
|
while len(node.getrawmempool()) > 0:
|
||||||
self.generate(self.nodes[0], 1)
|
self.generate(node, 1)
|
||||||
reps += 1
|
reps += 1
|
||||||
self.log.info("Finished splitting")
|
|
||||||
|
|
||||||
# Now we can connect the other nodes, didn't want to connect them earlier
|
|
||||||
# so the estimates would not be affected by the splitting transactions
|
|
||||||
self.start_node(1)
|
|
||||||
self.start_node(2)
|
|
||||||
self.connect_nodes(1, 0)
|
|
||||||
self.connect_nodes(0, 2)
|
|
||||||
self.connect_nodes(2, 1)
|
|
||||||
|
|
||||||
self.sync_all()
|
|
||||||
|
|
||||||
|
def sanity_check_estimates_range(self):
|
||||||
|
"""Populate estimation buckets, assert estimates are in a sane range and
|
||||||
|
are strictly increasing as the target decreases."""
|
||||||
self.fees_per_kb = []
|
self.fees_per_kb = []
|
||||||
self.memutxo = []
|
self.memutxo = []
|
||||||
self.confutxo = self.txouts # Start with the set of confirmed txouts after splitting
|
self.confutxo = self.txouts # Start with the set of confirmed txouts after splitting
|
||||||
|
@ -279,12 +267,36 @@ class EstimateFeeTest(BitcoinTestFramework):
|
||||||
self.log.info("Final estimates after emptying mempools")
|
self.log.info("Final estimates after emptying mempools")
|
||||||
check_estimates(self.nodes[1], self.fees_per_kb)
|
check_estimates(self.nodes[1], self.fees_per_kb)
|
||||||
|
|
||||||
# check that the effective feerate is greater than or equal to the mempoolminfee even for high mempoolminfee
|
def test_feerate_mempoolminfee(self):
|
||||||
self.log.info("Test fee rate estimation after restarting node with high MempoolMinFee")
|
|
||||||
high_val = 3*self.nodes[1].estimatesmartfee(1)['feerate']
|
high_val = 3*self.nodes[1].estimatesmartfee(1)['feerate']
|
||||||
self.restart_node(1, extra_args=[f'-minrelaytxfee={high_val}'])
|
self.restart_node(1, extra_args=[f'-minrelaytxfee={high_val}'])
|
||||||
check_estimates(self.nodes[1], self.fees_per_kb)
|
check_estimates(self.nodes[1], self.fees_per_kb)
|
||||||
|
|
||||||
|
def run_test(self):
|
||||||
|
self.log.info("This test is time consuming, please be patient")
|
||||||
|
self.log.info("Splitting inputs so we can generate tx's")
|
||||||
|
|
||||||
|
# Split two coinbases into many small utxos
|
||||||
|
self.start_node(0)
|
||||||
|
self.initial_split(self.nodes[0])
|
||||||
|
self.log.info("Finished splitting")
|
||||||
|
|
||||||
|
# Now we can connect the other nodes, didn't want to connect them earlier
|
||||||
|
# so the estimates would not be affected by the splitting transactions
|
||||||
|
self.start_node(1)
|
||||||
|
self.start_node(2)
|
||||||
|
self.connect_nodes(1, 0)
|
||||||
|
self.connect_nodes(0, 2)
|
||||||
|
self.connect_nodes(2, 1)
|
||||||
|
self.sync_all()
|
||||||
|
|
||||||
|
self.log.info("Testing estimates with single transactions.")
|
||||||
|
self.sanity_check_estimates_range()
|
||||||
|
|
||||||
|
# check that the effective feerate is greater than or equal to the mempoolminfee even for high mempoolminfee
|
||||||
|
self.log.info("Test fee rate estimation after restarting node with high MempoolMinFee")
|
||||||
|
self.test_feerate_mempoolminfee()
|
||||||
|
|
||||||
self.log.info("Testing that fee estimation is disabled in blocksonly.")
|
self.log.info("Testing that fee estimation is disabled in blocksonly.")
|
||||||
self.restart_node(0, ["-blocksonly"])
|
self.restart_node(0, ["-blocksonly"])
|
||||||
assert_raises_rpc_error(-32603, "Fee estimation disabled",
|
assert_raises_rpc_error(-32603, "Fee estimation disabled",
|
||||||
|
|
Loading…
Add table
Reference in a new issue