mirror of
https://github.com/bitcoin/bitcoin.git
synced 2025-02-02 09:46:52 -05:00
Merge #14522: tests: add invalid P2P message tests
d20a9fa13d
tests: add tests for invalid P2P messages (James O'Beirne)62f94d39f8
tests: add P2PConnection.send_raw_message (James O'Beirne)5aa31f6ef2
tests: add utility to assert node memory usage hasn't increased (James O'Beirne) Pull request description: - Adds `p2p_invalid_messages.py`: tests based on behavior for dealing with invalid and malformed P2P messages. Includes a test verifying that we can't DoS a node by spamming it with large invalid messages. - Adds `TestNode.assert_memory_usage_stable`: a context manager that allows us to ensure memory usage doesn't significantly increase on a node during some test. - Adds `P2PConnection.send_raw_message`: which allows us to construct and send messages with tweaked headers. Tree-SHA512: 720a4894c1e6d8f1551b2ae710e5b06c9e4f281524623957cb01599be9afea82671dc26d6152281de0acb87720f0c53b61e2b27d40434d30e525dd9e31fa671f
This commit is contained in:
commit
024816d6cf
4 changed files with 230 additions and 6 deletions
175
test/functional/p2p_invalid_messages.py
Executable file
175
test/functional/p2p_invalid_messages.py
Executable file
|
@ -0,0 +1,175 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
# Copyright (c) 2015-2018 The Bitcoin Core developers
|
||||||
|
# Distributed under the MIT software license, see the accompanying
|
||||||
|
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||||
|
"""Test node responses to invalid network messages."""
|
||||||
|
import struct
|
||||||
|
|
||||||
|
from test_framework import messages
|
||||||
|
from test_framework.mininode import P2PDataStore
|
||||||
|
from test_framework.test_framework import BitcoinTestFramework
|
||||||
|
|
||||||
|
|
||||||
|
class msg_unrecognized:
|
||||||
|
"""Nonsensical message. Modeled after similar types in test_framework.messages."""
|
||||||
|
|
||||||
|
command = b'badmsg'
|
||||||
|
|
||||||
|
def __init__(self, str_data):
|
||||||
|
self.str_data = str_data.encode() if not isinstance(str_data, bytes) else str_data
|
||||||
|
|
||||||
|
def serialize(self):
|
||||||
|
return messages.ser_string(self.str_data)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "{}(data={})".format(self.command, self.str_data)
|
||||||
|
|
||||||
|
|
||||||
|
class msg_nametoolong(msg_unrecognized):
|
||||||
|
|
||||||
|
command = b'thisnameiswayyyyyyyyytoolong'
|
||||||
|
|
||||||
|
|
||||||
|
class InvalidMessagesTest(BitcoinTestFramework):
|
||||||
|
|
||||||
|
def set_test_params(self):
|
||||||
|
self.num_nodes = 1
|
||||||
|
self.setup_clean_chain = True
|
||||||
|
|
||||||
|
def run_test(self):
|
||||||
|
"""
|
||||||
|
0. Send a bunch of large (4MB) messages of an unrecognized type. Check to see
|
||||||
|
that it isn't an effective DoS against the node.
|
||||||
|
|
||||||
|
1. Send an oversized (4MB+) message and check that we're disconnected.
|
||||||
|
|
||||||
|
2. Send a few messages with an incorrect data size in the header, ensure the
|
||||||
|
messages are ignored.
|
||||||
|
|
||||||
|
3. Send an unrecognized message with a command name longer than 12 characters.
|
||||||
|
|
||||||
|
"""
|
||||||
|
node = self.nodes[0]
|
||||||
|
self.node = node
|
||||||
|
node.add_p2p_connection(P2PDataStore())
|
||||||
|
conn2 = node.add_p2p_connection(P2PDataStore())
|
||||||
|
|
||||||
|
msg_limit = 4 * 1000 * 1000 # 4MB, per MAX_PROTOCOL_MESSAGE_LENGTH
|
||||||
|
valid_data_limit = msg_limit - 5 # Account for the 4-byte length prefix
|
||||||
|
|
||||||
|
#
|
||||||
|
# 0.
|
||||||
|
#
|
||||||
|
# Send as large a message as is valid, ensure we aren't disconnected but
|
||||||
|
# also can't exhaust resources.
|
||||||
|
#
|
||||||
|
msg_at_size = msg_unrecognized("b" * valid_data_limit)
|
||||||
|
assert len(msg_at_size.serialize()) == msg_limit
|
||||||
|
|
||||||
|
with node.assert_memory_usage_stable(perc_increase_allowed=0.03):
|
||||||
|
self.log.info(
|
||||||
|
"Sending a bunch of large, junk messages to test "
|
||||||
|
"memory exhaustion. May take a bit...")
|
||||||
|
|
||||||
|
# Run a bunch of times to test for memory exhaustion.
|
||||||
|
for _ in range(200):
|
||||||
|
node.p2p.send_message(msg_at_size)
|
||||||
|
|
||||||
|
# Check that, even though the node is being hammered by nonsense from one
|
||||||
|
# connection, it can still service other peers in a timely way.
|
||||||
|
for _ in range(20):
|
||||||
|
conn2.sync_with_ping(timeout=2)
|
||||||
|
|
||||||
|
# Peer 1, despite serving up a bunch of nonsense, should still be connected.
|
||||||
|
self.log.info("Waiting for node to drop junk messages.")
|
||||||
|
node.p2p.sync_with_ping(timeout=8)
|
||||||
|
assert node.p2p.is_connected
|
||||||
|
|
||||||
|
#
|
||||||
|
# 1.
|
||||||
|
#
|
||||||
|
# Send an oversized message, ensure we're disconnected.
|
||||||
|
#
|
||||||
|
msg_over_size = msg_unrecognized("b" * (valid_data_limit + 1))
|
||||||
|
assert len(msg_over_size.serialize()) == (msg_limit + 1)
|
||||||
|
|
||||||
|
with node.assert_debug_log(["Oversized message from peer=0, disconnecting"]):
|
||||||
|
# An unknown message type (or *any* message type) over
|
||||||
|
# MAX_PROTOCOL_MESSAGE_LENGTH should result in a disconnect.
|
||||||
|
node.p2p.send_message(msg_over_size)
|
||||||
|
node.p2p.wait_for_disconnect(timeout=4)
|
||||||
|
|
||||||
|
node.disconnect_p2ps()
|
||||||
|
conn = node.add_p2p_connection(P2PDataStore())
|
||||||
|
conn.wait_for_verack()
|
||||||
|
|
||||||
|
#
|
||||||
|
# 2.
|
||||||
|
#
|
||||||
|
# Send messages with an incorrect data size in the header.
|
||||||
|
#
|
||||||
|
actual_size = 100
|
||||||
|
msg = msg_unrecognized("b" * actual_size)
|
||||||
|
|
||||||
|
# TODO: handle larger-than cases. I haven't been able to pin down what behavior to expect.
|
||||||
|
for wrong_size in (2, 77, 78, 79):
|
||||||
|
self.log.info("Sending a message with incorrect size of {}".format(wrong_size))
|
||||||
|
|
||||||
|
# Unmodified message should submit okay.
|
||||||
|
node.p2p.send_and_ping(msg)
|
||||||
|
|
||||||
|
# A message lying about its data size results in a disconnect when the incorrect
|
||||||
|
# data size is less than the actual size.
|
||||||
|
#
|
||||||
|
# TODO: why does behavior change at 78 bytes?
|
||||||
|
#
|
||||||
|
node.p2p.send_raw_message(self._tweak_msg_data_size(msg, wrong_size))
|
||||||
|
|
||||||
|
# For some reason unknown to me, we sometimes have to push additional data to the
|
||||||
|
# peer in order for it to realize a disconnect.
|
||||||
|
try:
|
||||||
|
node.p2p.send_message(messages.msg_ping(nonce=123123))
|
||||||
|
except IOError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
node.p2p.wait_for_disconnect(timeout=10)
|
||||||
|
node.disconnect_p2ps()
|
||||||
|
node.add_p2p_connection(P2PDataStore())
|
||||||
|
|
||||||
|
#
|
||||||
|
# 3.
|
||||||
|
#
|
||||||
|
# Send a message with a too-long command name.
|
||||||
|
#
|
||||||
|
node.p2p.send_message(msg_nametoolong("foobar"))
|
||||||
|
node.p2p.wait_for_disconnect(timeout=4)
|
||||||
|
|
||||||
|
# Node is still up.
|
||||||
|
conn = node.add_p2p_connection(P2PDataStore())
|
||||||
|
conn.sync_with_ping()
|
||||||
|
|
||||||
|
|
||||||
|
def _tweak_msg_data_size(self, message, wrong_size):
|
||||||
|
"""
|
||||||
|
Return a raw message based on another message but with an incorrect data size in
|
||||||
|
the message header.
|
||||||
|
"""
|
||||||
|
raw_msg = self.node.p2p.build_message(message)
|
||||||
|
|
||||||
|
bad_size_bytes = struct.pack("<I", wrong_size)
|
||||||
|
num_header_bytes_before_size = 4 + 12
|
||||||
|
|
||||||
|
# Replace the correct data size in the message with an incorrect one.
|
||||||
|
raw_msg_with_wrong_size = (
|
||||||
|
raw_msg[:num_header_bytes_before_size] +
|
||||||
|
bad_size_bytes +
|
||||||
|
raw_msg[(num_header_bytes_before_size + len(bad_size_bytes)):]
|
||||||
|
)
|
||||||
|
assert len(raw_msg) == len(raw_msg_with_wrong_size)
|
||||||
|
|
||||||
|
return raw_msg_with_wrong_size
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
InvalidMessagesTest().main()
|
|
@ -207,10 +207,13 @@ class P2PConnection(asyncio.Protocol):
|
||||||
|
|
||||||
This method takes a P2P payload, builds the P2P header and adds
|
This method takes a P2P payload, builds the P2P header and adds
|
||||||
the message to the send buffer to be sent over the socket."""
|
the message to the send buffer to be sent over the socket."""
|
||||||
|
tmsg = self.build_message(message)
|
||||||
|
self._log_message("send", message)
|
||||||
|
return self.send_raw_message(tmsg)
|
||||||
|
|
||||||
|
def send_raw_message(self, raw_message_bytes):
|
||||||
if not self.is_connected:
|
if not self.is_connected:
|
||||||
raise IOError('Not connected')
|
raise IOError('Not connected')
|
||||||
self._log_message("send", message)
|
|
||||||
tmsg = self._build_message(message)
|
|
||||||
|
|
||||||
def maybe_write():
|
def maybe_write():
|
||||||
if not self._transport:
|
if not self._transport:
|
||||||
|
@ -220,12 +223,12 @@ class P2PConnection(asyncio.Protocol):
|
||||||
# Python 3.4 versions.
|
# Python 3.4 versions.
|
||||||
if hasattr(self._transport, 'is_closing') and self._transport.is_closing():
|
if hasattr(self._transport, 'is_closing') and self._transport.is_closing():
|
||||||
return
|
return
|
||||||
self._transport.write(tmsg)
|
self._transport.write(raw_message_bytes)
|
||||||
NetworkThread.network_event_loop.call_soon_threadsafe(maybe_write)
|
NetworkThread.network_event_loop.call_soon_threadsafe(maybe_write)
|
||||||
|
|
||||||
# Class utility methods
|
# Class utility methods
|
||||||
|
|
||||||
def _build_message(self, message):
|
def build_message(self, message):
|
||||||
"""Build a serialized P2P message"""
|
"""Build a serialized P2P message"""
|
||||||
command = message.command
|
command = message.command
|
||||||
data = message.serialize()
|
data = message.serialize()
|
||||||
|
@ -409,9 +412,9 @@ class P2PInterface(P2PConnection):
|
||||||
|
|
||||||
# Message sending helper functions
|
# Message sending helper functions
|
||||||
|
|
||||||
def send_and_ping(self, message):
|
def send_and_ping(self, message, timeout=60):
|
||||||
self.send_message(message)
|
self.send_message(message)
|
||||||
self.sync_with_ping()
|
self.sync_with_ping(timeout=timeout)
|
||||||
|
|
||||||
# Sync up with the node
|
# Sync up with the node
|
||||||
def sync_with_ping(self, timeout=60):
|
def sync_with_ping(self, timeout=60):
|
||||||
|
|
|
@ -115,6 +115,28 @@ class TestNode():
|
||||||
]
|
]
|
||||||
return PRIV_KEYS[self.index]
|
return PRIV_KEYS[self.index]
|
||||||
|
|
||||||
|
def get_mem_rss(self):
|
||||||
|
"""Get the memory usage (RSS) per `ps`.
|
||||||
|
|
||||||
|
If process is stopped or `ps` is unavailable, return None.
|
||||||
|
"""
|
||||||
|
if not (self.running and self.process):
|
||||||
|
self.log.warning("Couldn't get memory usage; process isn't running.")
|
||||||
|
return None
|
||||||
|
|
||||||
|
try:
|
||||||
|
return int(subprocess.check_output(
|
||||||
|
"ps h -o rss {}".format(self.process.pid),
|
||||||
|
shell=True, stderr=subprocess.DEVNULL).strip())
|
||||||
|
|
||||||
|
# Catching `Exception` broadly to avoid failing on platforms where ps
|
||||||
|
# isn't installed or doesn't work as expected, e.g. OpenBSD.
|
||||||
|
#
|
||||||
|
# We could later use something like `psutils` to work across platforms.
|
||||||
|
except Exception:
|
||||||
|
self.log.exception("Unable to get memory usage")
|
||||||
|
return None
|
||||||
|
|
||||||
def _node_msg(self, msg: str) -> str:
|
def _node_msg(self, msg: str) -> str:
|
||||||
"""Return a modified msg that identifies this node by its index as a debugging aid."""
|
"""Return a modified msg that identifies this node by its index as a debugging aid."""
|
||||||
return "[node %d] %s" % (self.index, msg)
|
return "[node %d] %s" % (self.index, msg)
|
||||||
|
@ -271,6 +293,29 @@ class TestNode():
|
||||||
if re.search(re.escape(expected_msg), log, flags=re.MULTILINE) is None:
|
if re.search(re.escape(expected_msg), log, flags=re.MULTILINE) is None:
|
||||||
self._raise_assertion_error('Expected message "{}" does not partially match log:\n\n{}\n\n'.format(expected_msg, print_log))
|
self._raise_assertion_error('Expected message "{}" does not partially match log:\n\n{}\n\n'.format(expected_msg, print_log))
|
||||||
|
|
||||||
|
@contextlib.contextmanager
|
||||||
|
def assert_memory_usage_stable(self, perc_increase_allowed=0.03):
|
||||||
|
"""Context manager that allows the user to assert that a node's memory usage (RSS)
|
||||||
|
hasn't increased beyond some threshold percentage.
|
||||||
|
"""
|
||||||
|
before_memory_usage = self.get_mem_rss()
|
||||||
|
|
||||||
|
yield
|
||||||
|
|
||||||
|
after_memory_usage = self.get_mem_rss()
|
||||||
|
|
||||||
|
if not (before_memory_usage and after_memory_usage):
|
||||||
|
self.log.warning("Unable to detect memory usage (RSS) - skipping memory check.")
|
||||||
|
return
|
||||||
|
|
||||||
|
perc_increase_memory_usage = 1 - (float(before_memory_usage) / after_memory_usage)
|
||||||
|
|
||||||
|
if perc_increase_memory_usage > perc_increase_allowed:
|
||||||
|
self._raise_assertion_error(
|
||||||
|
"Memory usage increased over threshold of {:.3f}% from {} to {} ({:.3f}%)".format(
|
||||||
|
perc_increase_allowed * 100, before_memory_usage, after_memory_usage,
|
||||||
|
perc_increase_memory_usage * 100))
|
||||||
|
|
||||||
def assert_start_raises_init_error(self, extra_args=None, expected_msg=None, match=ErrorMatch.FULL_TEXT, *args, **kwargs):
|
def assert_start_raises_init_error(self, extra_args=None, expected_msg=None, match=ErrorMatch.FULL_TEXT, *args, **kwargs):
|
||||||
"""Attempt to start the node and expect it to raise an error.
|
"""Attempt to start the node and expect it to raise an error.
|
||||||
|
|
||||||
|
|
|
@ -136,6 +136,7 @@ BASE_SCRIPTS = [
|
||||||
'mining_prioritisetransaction.py',
|
'mining_prioritisetransaction.py',
|
||||||
'p2p_invalid_locator.py',
|
'p2p_invalid_locator.py',
|
||||||
'p2p_invalid_block.py',
|
'p2p_invalid_block.py',
|
||||||
|
'p2p_invalid_messages.py',
|
||||||
'p2p_invalid_tx.py',
|
'p2p_invalid_tx.py',
|
||||||
'feature_assumevalid.py',
|
'feature_assumevalid.py',
|
||||||
'example_test.py',
|
'example_test.py',
|
||||||
|
|
Loading…
Add table
Reference in a new issue