mirror of
https://github.com/bitcoin/bitcoin.git
synced 2025-02-02 09:46:52 -05:00
Merge #18726: test: check misbehavior more independently in p2p_filter.py
cd543d9193
test: check misbehavior more independently in p2p_filter.py (Danny Lee) Pull request description: This expands on #18672 in two ways: - Check positive cases (`filterload` accepted, `filteradd` accepted) in addition to the negative cases added in #18672 - Address MarcoFalke 's [suggestion](https://github.com/bitcoin/bitcoin/pull/18672#discussion_r412101752) to successfully load a filter before testing `filteradd` ACKs for top commit: theStack: re-ACKcd543d9193
Tree-SHA512: f82402f6287ccddf08b38b6432d5e2b2b2ef528802a981d04c24bac459022f732d9090d4849d72d3d1eb2c757161dcb18c4c036b6e11dc80114e9cd49f21c3bd
This commit is contained in:
commit
36c0abd8f6
1 changed files with 26 additions and 5 deletions
|
@ -64,19 +64,40 @@ class FilterTest(BitcoinTestFramework):
|
|||
def skip_test_if_missing_module(self):
|
||||
self.skip_if_no_wallet()
|
||||
|
||||
def run_test(self):
|
||||
filter_node = self.nodes[0].add_p2p_connection(FilterNode())
|
||||
|
||||
def test_size_limits(self, filter_node):
|
||||
self.log.info('Check that too large filter is rejected')
|
||||
with self.nodes[0].assert_debug_log(['Misbehaving']):
|
||||
filter_node.send_and_ping(msg_filterload(data=b'\xaa', nHashFuncs=MAX_BLOOM_HASH_FUNCS+1))
|
||||
with self.nodes[0].assert_debug_log(['Misbehaving']):
|
||||
filter_node.send_and_ping(msg_filterload(data=b'\xbb'*(MAX_BLOOM_FILTER_SIZE+1)))
|
||||
|
||||
self.log.info('Check that max size filter is accepted')
|
||||
with self.nodes[0].assert_debug_log([], unexpected_msgs=['Misbehaving']):
|
||||
filter_node.send_and_ping(msg_filterload(data=b'\xbb'*(MAX_BLOOM_FILTER_SIZE)))
|
||||
filter_node.send_and_ping(msg_filterclear())
|
||||
|
||||
self.log.info('Check that filter with too many hash functions is rejected')
|
||||
with self.nodes[0].assert_debug_log(['Misbehaving']):
|
||||
filter_node.send_and_ping(msg_filterload(data=b'\xaa', nHashFuncs=MAX_BLOOM_HASH_FUNCS+1))
|
||||
|
||||
self.log.info('Check that filter with max hash functions is accepted')
|
||||
with self.nodes[0].assert_debug_log([], unexpected_msgs=['Misbehaving']):
|
||||
filter_node.send_and_ping(msg_filterload(data=b'\xaa', nHashFuncs=MAX_BLOOM_HASH_FUNCS))
|
||||
# Don't send filterclear until next two filteradd checks are done
|
||||
|
||||
self.log.info('Check that max size data element to add to the filter is accepted')
|
||||
with self.nodes[0].assert_debug_log([], unexpected_msgs=['Misbehaving']):
|
||||
filter_node.send_and_ping(msg_filteradd(data=b'\xcc'*(MAX_SCRIPT_ELEMENT_SIZE)))
|
||||
|
||||
self.log.info('Check that too large data element to add to the filter is rejected')
|
||||
with self.nodes[0].assert_debug_log(['Misbehaving']):
|
||||
filter_node.send_and_ping(msg_filteradd(data=b'\xcc'*(MAX_SCRIPT_ELEMENT_SIZE+1)))
|
||||
|
||||
filter_node.send_and_ping(msg_filterclear())
|
||||
|
||||
def run_test(self):
|
||||
filter_node = self.nodes[0].add_p2p_connection(FilterNode())
|
||||
|
||||
self.test_size_limits(filter_node)
|
||||
|
||||
self.log.info('Add filtered P2P connection to the node')
|
||||
filter_node.send_and_ping(filter_node.watch_filter_init)
|
||||
filter_address = self.nodes[0].decodescript(filter_node.watch_script_pubkey)['addresses'][0]
|
||||
|
|
Loading…
Add table
Reference in a new issue