mirror of
https://github.com/bitcoin/bitcoin.git
synced 2025-02-03 09:56:38 -05:00
test: fix v2 transport intermittent test failure (#29002)
Only relying on the number of peers for detecting a new connection suffers from race conditions, as unrelated previous peers could disconnect at anytime in-between. Use the more robust approach of watching for an increased highest peer id instead (again using the `getpeerinfo` RPC call), with a newly introduced context manager method `TestNode.wait_for_new_peer()`. Fixes #29009.
This commit is contained in:
parent
6d5790956f
commit
00e0658e77
2 changed files with 26 additions and 8 deletions
|
@ -133,9 +133,8 @@ class V2TransportTest(BitcoinTestFramework):
|
|||
V1_PREFIX = MAGIC_BYTES["regtest"] + b"version\x00\x00\x00\x00\x00"
|
||||
assert_equal(len(V1_PREFIX), 16)
|
||||
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
||||
num_peers = len(self.nodes[0].getpeerinfo())
|
||||
s.connect(("127.0.0.1", p2p_port(0)))
|
||||
self.wait_until(lambda: len(self.nodes[0].getpeerinfo()) == num_peers + 1)
|
||||
with self.nodes[0].wait_for_new_peer():
|
||||
s.connect(("127.0.0.1", p2p_port(0)))
|
||||
s.sendall(V1_PREFIX[:-1])
|
||||
assert_equal(self.nodes[0].getpeerinfo()[-1]["transport_protocol_type"], "detecting")
|
||||
s.sendall(bytes([V1_PREFIX[-1]])) # send out last prefix byte
|
||||
|
@ -144,22 +143,23 @@ class V2TransportTest(BitcoinTestFramework):
|
|||
# Check wrong network prefix detection (hits if the next 12 bytes correspond to a v1 version message)
|
||||
wrong_network_magic_prefix = MAGIC_BYTES["signet"] + V1_PREFIX[4:]
|
||||
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
||||
s.connect(("127.0.0.1", p2p_port(0)))
|
||||
with self.nodes[0].wait_for_new_peer():
|
||||
s.connect(("127.0.0.1", p2p_port(0)))
|
||||
with self.nodes[0].assert_debug_log(["V2 transport error: V1 peer with wrong MessageStart"]):
|
||||
s.sendall(wrong_network_magic_prefix + b"somepayload")
|
||||
|
||||
# Check detection of missing garbage terminator (hits after fixed amount of data if terminator never matches garbage)
|
||||
MAX_KEY_GARB_AND_GARBTERM_LEN = 64 + 4095 + 16
|
||||
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
||||
num_peers = len(self.nodes[0].getpeerinfo())
|
||||
s.connect(("127.0.0.1", p2p_port(0)))
|
||||
self.wait_until(lambda: len(self.nodes[0].getpeerinfo()) == num_peers + 1)
|
||||
with self.nodes[0].wait_for_new_peer():
|
||||
s.connect(("127.0.0.1", p2p_port(0)))
|
||||
s.sendall(b'\x00' * (MAX_KEY_GARB_AND_GARBTERM_LEN - 1))
|
||||
self.wait_until(lambda: self.nodes[0].getpeerinfo()[-1]["bytesrecv"] == MAX_KEY_GARB_AND_GARBTERM_LEN - 1)
|
||||
with self.nodes[0].assert_debug_log(["V2 transport error: missing garbage terminator"]):
|
||||
peer_id = self.nodes[0].getpeerinfo()[-1]["id"]
|
||||
s.sendall(b'\x00') # send out last byte
|
||||
# should disconnect immediately
|
||||
self.wait_until(lambda: len(self.nodes[0].getpeerinfo()) == num_peers)
|
||||
self.wait_until(lambda: not peer_id in [p["id"] for p in self.nodes[0].getpeerinfo()])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
|
|
@ -519,6 +519,24 @@ class TestNode():
|
|||
'Expected messages "{}" does not partially match log:\n\n{}\n\n'.format(
|
||||
str(expected_msgs), print_log))
|
||||
|
||||
@contextlib.contextmanager
|
||||
def wait_for_new_peer(self, timeout=5):
|
||||
"""
|
||||
Wait until the node is connected to at least one new peer. We detect this
|
||||
by watching for an increased highest peer id, using the `getpeerinfo` RPC call.
|
||||
Note that the simpler approach of only accounting for the number of peers
|
||||
suffers from race conditions, as disconnects from unrelated previous peers
|
||||
could happen anytime in-between.
|
||||
"""
|
||||
def get_highest_peer_id():
|
||||
peer_info = self.getpeerinfo()
|
||||
return peer_info[-1]["id"] if peer_info else -1
|
||||
|
||||
initial_peer_id = get_highest_peer_id()
|
||||
yield
|
||||
wait_until_helper_internal(lambda: get_highest_peer_id() > initial_peer_id,
|
||||
timeout=timeout, timeout_factor=self.timeout_factor)
|
||||
|
||||
@contextlib.contextmanager
|
||||
def profile_with_perf(self, profile_name: str):
|
||||
"""
|
||||
|
|
Loading…
Add table
Reference in a new issue