mirror of
https://github.com/bitcoin/bitcoin.git
synced 2025-02-10 10:52:31 -05:00
![Ava Chow](/assets/img/avatar_default.png)
29029df5c7
[doc] v3 signaling in mempool-replacements.md (glozow)e643ea795e
[fuzz] v3 transactions and sigop-adjusted vsize (glozow)1fd16b5c62
[functional test] v3 transaction submission (glozow)27c8786ba9
test framework: Add and use option for tx-version in MiniWallet methods (MarcoFalke)9a1fea55b2
[policy/validation] allow v3 transactions with certain restrictions (glozow)eb8d5a2e7d
[policy] add v3 policy rules (glozow)9a29d470fb
[rpc] return full string for package_msg and package-error (glozow)158623b8e0
[refactor] change Workspace::m_conflicts and adjacent funcs/structs to use Txid (glozow) Pull request description: See #27463 for overall package relay tracking. Delving Bitcoin discussion thread: https://delvingbitcoin.org/t/v3-transaction-policy-for-anti-pinning/340 Delving Bitcoin discussion for LN usage: https://delvingbitcoin.org/t/lightning-transactions-with-v3-and-ephemeral-anchors/418 Rationale: - There are various pinning problems with RBF and our general ancestor/descendant limits. These policies help mitigate many pinning attacks and make package RBF feasible (see #28984 which implements package RBF on top of this). I would focus the most here on Rule 3 pinning. [1][2] - Switching to a cluster-based mempool (see #27677 and #28676) requires the removal of CPFP carve out, which applications depend on. V3 + package RBF + ephemeral anchors + 1-parent-1-child package relay provides an intermediate solution. V3 policy is for "Priority Transactions." [3][4] It allows users to opt in to more restrictive topological limits for shared transactions, in exchange for the more robust fee-bumping abilities that offers. Even though we don't have cluster limits, we are able to treat these transactions as having as having a maximum cluster size of 2. Immediate benefits: - You can presign a transaction with 0 fees (not just 1sat/vB!) and add a fee-bump later. - Rule 3 pinning is reduced by a significant amount, since the attacker can only attach a maximum of 1000vB to your shared transaction. This also enables some other cool things (again see #27463 for overall roadmap): - Ephemeral Anchors - Package RBF for these 1-parent-1-child packages. That means e.g. a commitment tx + child can replace another commitment tx using the child's fees. - We can transition to a "single anchor" universe without worrying about package limit pinning. So current users of CPFP carve out would have something else to use. - We can switch to a cluster-based mempool [5] (#27677 #28676), which removes CPFP carve out [6]. [1]: Original mailing list post and discussion about RBF pinning problems https://gist.github.com/glozow/25d9662c52453bd08b4b4b1d3783b9ff, https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2022-January/019817.html [2]: A FAQ is "we need this for cluster mempool, but is this still necessary afterwards?" There are some pinning issues that are fixed here and not fully fixed in cluster mempool, so we will still want this or something similar afterward. [3]: Mailing list post for v3 https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2022-September/020937.html [4]: Original PR #25038 also contains a lot of the discussion [5]: https://delvingbitcoin.org/t/an-overview-of-the-cluster-mempool-proposal/393/7 [6]: https://delvingbitcoin.org/t/an-overview-of-the-cluster-mempool-proposal/393#the-cpfp-carveout-rule-can-no-longer-be-supported-12 ACKs for top commit: sdaftuar: ACK29029df5c7
achow101: ACK29029df5c7
instagibbs: ACK29029df5c7
modulo that Tree-SHA512: 9664b078890cfdca2a146439f8835c9d9ab483f43b30af8c7cd6962f09aa557fb1ce7689d5e130a2ec142235dbc8f21213881baa75241c5881660f9008d68450
199 lines
9.3 KiB
Python
Executable file
199 lines
9.3 KiB
Python
Executable file
#!/usr/bin/env python3
|
|
# Copyright (c) 2023 The Bitcoin Core developers
|
|
# Distributed under the MIT software license, see the accompanying
|
|
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
|
"""Test sigop limit mempool policy (`-bytespersigop` parameter)"""
|
|
from decimal import Decimal
|
|
from math import ceil
|
|
|
|
from test_framework.messages import (
|
|
COutPoint,
|
|
CTransaction,
|
|
CTxIn,
|
|
CTxInWitness,
|
|
CTxOut,
|
|
WITNESS_SCALE_FACTOR,
|
|
tx_from_hex,
|
|
)
|
|
from test_framework.script import (
|
|
CScript,
|
|
OP_CHECKMULTISIG,
|
|
OP_CHECKSIG,
|
|
OP_ENDIF,
|
|
OP_FALSE,
|
|
OP_IF,
|
|
OP_RETURN,
|
|
OP_TRUE,
|
|
)
|
|
from test_framework.script_util import (
|
|
keys_to_multisig_script,
|
|
script_to_p2wsh_script,
|
|
)
|
|
from test_framework.test_framework import BitcoinTestFramework
|
|
from test_framework.util import (
|
|
assert_equal,
|
|
assert_greater_than,
|
|
assert_greater_than_or_equal,
|
|
)
|
|
from test_framework.wallet import MiniWallet
|
|
from test_framework.wallet_util import generate_keypair
|
|
|
|
DEFAULT_BYTES_PER_SIGOP = 20 # default setting
|
|
|
|
|
|
class BytesPerSigOpTest(BitcoinTestFramework):
|
|
def set_test_params(self):
|
|
self.num_nodes = 1
|
|
# allow large datacarrier output to pad transactions
|
|
self.extra_args = [['-datacarriersize=100000']]
|
|
|
|
def create_p2wsh_spending_tx(self, witness_script, output_script):
|
|
"""Create a 1-input-1-output P2WSH spending transaction with only the
|
|
witness script in the witness stack and the given output script."""
|
|
# create P2WSH address and fund it via MiniWallet first
|
|
fund = self.wallet.send_to(
|
|
from_node=self.nodes[0],
|
|
scriptPubKey=script_to_p2wsh_script(witness_script),
|
|
amount=1000000,
|
|
)
|
|
|
|
# create spending transaction
|
|
tx = CTransaction()
|
|
tx.vin = [CTxIn(COutPoint(int(fund["txid"], 16), fund["sent_vout"]))]
|
|
tx.wit.vtxinwit = [CTxInWitness()]
|
|
tx.wit.vtxinwit[0].scriptWitness.stack = [bytes(witness_script)]
|
|
tx.vout = [CTxOut(500000, output_script)]
|
|
return tx
|
|
|
|
def test_sigops_limit(self, bytes_per_sigop, num_sigops):
|
|
sigop_equivalent_vsize = ceil(num_sigops * bytes_per_sigop / WITNESS_SCALE_FACTOR)
|
|
self.log.info(f"- {num_sigops} sigops (equivalent size of {sigop_equivalent_vsize} vbytes)")
|
|
|
|
# create a template tx with the specified sigop cost in the witness script
|
|
# (note that the sigops count even though being in a branch that's not executed)
|
|
num_multisigops = num_sigops // 20
|
|
num_singlesigops = num_sigops % 20
|
|
witness_script = CScript(
|
|
[OP_FALSE, OP_IF] +
|
|
[OP_CHECKMULTISIG]*num_multisigops +
|
|
[OP_CHECKSIG]*num_singlesigops +
|
|
[OP_ENDIF, OP_TRUE]
|
|
)
|
|
# use a 256-byte data-push as lower bound in the output script, in order
|
|
# to avoid having to compensate for tx size changes caused by varying
|
|
# length serialization sizes (both for scriptPubKey and data-push lengths)
|
|
tx = self.create_p2wsh_spending_tx(witness_script, CScript([OP_RETURN, b'X'*256]))
|
|
|
|
# bump the tx to reach the sigop-limit equivalent size by padding the datacarrier output
|
|
assert_greater_than_or_equal(sigop_equivalent_vsize, tx.get_vsize())
|
|
vsize_to_pad = sigop_equivalent_vsize - tx.get_vsize()
|
|
tx.vout[0].scriptPubKey = CScript([OP_RETURN, b'X'*(256+vsize_to_pad)])
|
|
assert_equal(sigop_equivalent_vsize, tx.get_vsize())
|
|
|
|
res = self.nodes[0].testmempoolaccept([tx.serialize().hex()])[0]
|
|
assert_equal(res['allowed'], True)
|
|
assert_equal(res['vsize'], sigop_equivalent_vsize)
|
|
|
|
# increase the tx's vsize to be right above the sigop-limit equivalent size
|
|
# => tx's vsize in mempool should also grow accordingly
|
|
tx.vout[0].scriptPubKey = CScript([OP_RETURN, b'X'*(256+vsize_to_pad+1)])
|
|
res = self.nodes[0].testmempoolaccept([tx.serialize().hex()])[0]
|
|
assert_equal(res['allowed'], True)
|
|
assert_equal(res['vsize'], sigop_equivalent_vsize+1)
|
|
|
|
# decrease the tx's vsize to be right below the sigop-limit equivalent size
|
|
# => tx's vsize in mempool should stick at the sigop-limit equivalent
|
|
# bytes level, as it is higher than the tx's serialized vsize
|
|
# (the maximum of both is taken)
|
|
tx.vout[0].scriptPubKey = CScript([OP_RETURN, b'X'*(256+vsize_to_pad-1)])
|
|
res = self.nodes[0].testmempoolaccept([tx.serialize().hex()])[0]
|
|
assert_equal(res['allowed'], True)
|
|
assert_equal(res['vsize'], sigop_equivalent_vsize)
|
|
|
|
# check that the ancestor and descendant size calculations in the mempool
|
|
# also use the same max(sigop_equivalent_vsize, serialized_vsize) logic
|
|
# (to keep it simple, we only test the case here where the sigop vsize
|
|
# is much larger than the serialized vsize, i.e. we create a small child
|
|
# tx by getting rid of the large padding output)
|
|
tx.vout[0].scriptPubKey = CScript([OP_RETURN, b'test123'])
|
|
assert_greater_than(sigop_equivalent_vsize, tx.get_vsize())
|
|
self.nodes[0].sendrawtransaction(hexstring=tx.serialize().hex(), maxburnamount='1.0')
|
|
|
|
# fetch parent tx, which doesn't contain any sigops
|
|
parent_txid = tx.vin[0].prevout.hash.to_bytes(32, 'big').hex()
|
|
parent_tx = tx_from_hex(self.nodes[0].getrawtransaction(txid=parent_txid))
|
|
|
|
entry_child = self.nodes[0].getmempoolentry(tx.rehash())
|
|
assert_equal(entry_child['descendantcount'], 1)
|
|
assert_equal(entry_child['descendantsize'], sigop_equivalent_vsize)
|
|
assert_equal(entry_child['ancestorcount'], 2)
|
|
assert_equal(entry_child['ancestorsize'], sigop_equivalent_vsize + parent_tx.get_vsize())
|
|
|
|
entry_parent = self.nodes[0].getmempoolentry(parent_tx.rehash())
|
|
assert_equal(entry_parent['ancestorcount'], 1)
|
|
assert_equal(entry_parent['ancestorsize'], parent_tx.get_vsize())
|
|
assert_equal(entry_parent['descendantcount'], 2)
|
|
assert_equal(entry_parent['descendantsize'], parent_tx.get_vsize() + sigop_equivalent_vsize)
|
|
|
|
def test_sigops_package(self):
|
|
self.log.info("Test a overly-large sigops-vbyte hits package limits")
|
|
# Make a 2-transaction package which fails vbyte checks even though
|
|
# separately they would work.
|
|
self.restart_node(0, extra_args=["-bytespersigop=5000","-permitbaremultisig=1"] + self.extra_args[0])
|
|
|
|
def create_bare_multisig_tx(utxo_to_spend=None):
|
|
_, pubkey = generate_keypair()
|
|
amount_for_bare = 50000
|
|
tx_dict = self.wallet.create_self_transfer(fee=Decimal("3"), utxo_to_spend=utxo_to_spend)
|
|
tx_utxo = tx_dict["new_utxo"]
|
|
tx = tx_dict["tx"]
|
|
tx.vout.append(CTxOut(amount_for_bare, keys_to_multisig_script([pubkey], k=1)))
|
|
tx.vout[0].nValue -= amount_for_bare
|
|
tx_utxo["txid"] = tx.rehash()
|
|
tx_utxo["value"] -= Decimal("0.00005000")
|
|
return (tx_utxo, tx)
|
|
|
|
tx_parent_utxo, tx_parent = create_bare_multisig_tx()
|
|
tx_child_utxo, tx_child = create_bare_multisig_tx(tx_parent_utxo)
|
|
|
|
# Separately, the parent tx is ok
|
|
parent_individual_testres = self.nodes[0].testmempoolaccept([tx_parent.serialize().hex()])[0]
|
|
assert parent_individual_testres["allowed"]
|
|
# Multisig is counted as MAX_PUBKEYS_PER_MULTISIG = 20 sigops
|
|
assert_equal(parent_individual_testres["vsize"], 5000 * 20)
|
|
|
|
# But together, it's exceeding limits in the *package* context. If sigops adjusted vsize wasn't being checked
|
|
# here, it would get further in validation and give too-long-mempool-chain error instead.
|
|
packet_test = self.nodes[0].testmempoolaccept([tx_parent.serialize().hex(), tx_child.serialize().hex()])
|
|
expected_package_error = f"package-mempool-limits, package size {2*20*5000} exceeds ancestor size limit [limit: 101000]"
|
|
assert_equal([x["package-error"] for x in packet_test], [expected_package_error] * 2)
|
|
|
|
# When we actually try to submit, the parent makes it into the mempool, but the child would exceed ancestor vsize limits
|
|
res = self.nodes[0].submitpackage([tx_parent.serialize().hex(), tx_child.serialize().hex()])
|
|
assert "too-long-mempool-chain" in res["tx-results"][tx_child.getwtxid()]["error"]
|
|
assert tx_parent.rehash() in self.nodes[0].getrawmempool()
|
|
|
|
# Transactions are tiny in weight
|
|
assert_greater_than(2000, tx_parent.get_weight() + tx_child.get_weight())
|
|
|
|
def run_test(self):
|
|
self.wallet = MiniWallet(self.nodes[0])
|
|
|
|
for bytes_per_sigop in (DEFAULT_BYTES_PER_SIGOP, 43, 81, 165, 327, 649, 1072):
|
|
if bytes_per_sigop == DEFAULT_BYTES_PER_SIGOP:
|
|
self.log.info(f"Test default sigops limit setting ({bytes_per_sigop} bytes per sigop)...")
|
|
else:
|
|
bytespersigop_parameter = f"-bytespersigop={bytes_per_sigop}"
|
|
self.log.info(f"Test sigops limit setting {bytespersigop_parameter}...")
|
|
self.restart_node(0, extra_args=[bytespersigop_parameter] + self.extra_args[0])
|
|
|
|
for num_sigops in (69, 101, 142, 183, 222):
|
|
self.test_sigops_limit(bytes_per_sigop, num_sigops)
|
|
|
|
self.generate(self.wallet, 1)
|
|
|
|
self.test_sigops_package()
|
|
|
|
|
|
if __name__ == '__main__':
|
|
BytesPerSigOpTest().main()
|