0
0
Fork 0
mirror of https://github.com/bitcoin/bitcoin.git synced 2025-02-24 12:41:41 -05:00
bitcoin-bitcoin-core/src/node/miner.cpp
Sjors Provoost c504b6997b
refactor: add coinbase constraints to BlockCreateOptions
When generating a block template through e.g. getblocktemplate RPC,
we reserve 4000 weight units and 400 sigops. Pools use this space
for their coinbase outputs.

At least one pool patched their Bitcoin Core node to adjust
these hardcoded values. They eventually produced an invalid
block which exceeded the sigops limit.
https://bitcoin.stackexchange.com/questions/117837/how-many-sigops-are-in-the-invalid-block-783426

The existince of such patches suggests it may be useful to
make this value configurable. This commit would make such a
change easier.

The main motivation however is that the Stratum v2 spec
requires the pool to communicate the maximum bytes they intend
to add to the coinbase outputs. A proposed change to the spec
would also require them to communicate the maximum number of sigops.

This commit also documents what happens when
-blockmaxweight is lower than the coinbase
reserved value.

Co-authored-by: Ryan Ofsky <ryan@ofsky.org>
2024-07-17 18:33:15 +02:00

425 lines
17 KiB
C++

// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2022 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <node/miner.h>
#include <chain.h>
#include <chainparams.h>
#include <coins.h>
#include <common/args.h>
#include <consensus/amount.h>
#include <consensus/consensus.h>
#include <consensus/merkle.h>
#include <consensus/tx_verify.h>
#include <consensus/validation.h>
#include <deploymentstatus.h>
#include <logging.h>
#include <policy/feerate.h>
#include <policy/policy.h>
#include <pow.h>
#include <primitives/transaction.h>
#include <util/moneystr.h>
#include <util/time.h>
#include <validation.h>
#include <algorithm>
#include <utility>
namespace node {
int64_t UpdateTime(CBlockHeader* pblock, const Consensus::Params& consensusParams, const CBlockIndex* pindexPrev)
{
int64_t nOldTime = pblock->nTime;
int64_t nNewTime{std::max<int64_t>(pindexPrev->GetMedianTimePast() + 1, TicksSinceEpoch<std::chrono::seconds>(NodeClock::now()))};
if (nOldTime < nNewTime) {
pblock->nTime = nNewTime;
}
// Updating time can change work required on testnet:
if (consensusParams.fPowAllowMinDifficultyBlocks) {
pblock->nBits = GetNextWorkRequired(pindexPrev, pblock, consensusParams);
}
return nNewTime - nOldTime;
}
void RegenerateCommitments(CBlock& block, ChainstateManager& chainman)
{
CMutableTransaction tx{*block.vtx.at(0)};
tx.vout.erase(tx.vout.begin() + GetWitnessCommitmentIndex(block));
block.vtx.at(0) = MakeTransactionRef(tx);
const CBlockIndex* prev_block = WITH_LOCK(::cs_main, return chainman.m_blockman.LookupBlockIndex(block.hashPrevBlock));
chainman.GenerateCoinbaseCommitment(block, prev_block);
block.hashMerkleRoot = BlockMerkleRoot(block);
}
static BlockAssembler::Options ClampOptions(BlockAssembler::Options options)
{
Assert(options.coinbase_max_additional_weight <= DEFAULT_BLOCK_MAX_WEIGHT);
Assert(options.coinbase_output_max_additional_sigops <= MAX_BLOCK_SIGOPS_COST);
// Limit weight to between coinbase_max_additional_weight and DEFAULT_BLOCK_MAX_WEIGHT for sanity:
// Coinbase (reserved) outputs can safely exceed -blockmaxweight, but the rest of the block template will be empty.
options.nBlockMaxWeight = std::clamp<size_t>(options.nBlockMaxWeight, options.coinbase_max_additional_weight, DEFAULT_BLOCK_MAX_WEIGHT);
return options;
}
BlockAssembler::BlockAssembler(Chainstate& chainstate, const CTxMemPool* mempool, const Options& options)
: chainparams{chainstate.m_chainman.GetParams()},
m_mempool{options.use_mempool ? mempool : nullptr},
m_chainstate{chainstate},
m_options{ClampOptions(options)}
{
}
void ApplyArgsManOptions(const ArgsManager& args, BlockAssembler::Options& options)
{
// Block resource limits
options.nBlockMaxWeight = args.GetIntArg("-blockmaxweight", options.nBlockMaxWeight);
if (const auto blockmintxfee{args.GetArg("-blockmintxfee")}) {
if (const auto parsed{ParseMoney(*blockmintxfee)}) options.blockMinFeeRate = CFeeRate{*parsed};
}
options.print_modified_fee = args.GetBoolArg("-printpriority", options.print_modified_fee);
}
void BlockAssembler::resetBlock()
{
inBlock.clear();
// Reserve space for coinbase tx
nBlockWeight = m_options.coinbase_max_additional_weight;
nBlockSigOpsCost = m_options.coinbase_output_max_additional_sigops;
// These counters do not include coinbase tx
nBlockTx = 0;
nFees = 0;
}
std::unique_ptr<CBlockTemplate> BlockAssembler::CreateNewBlock(const CScript& scriptPubKeyIn)
{
const auto time_start{SteadyClock::now()};
resetBlock();
pblocktemplate.reset(new CBlockTemplate());
if (!pblocktemplate.get()) {
return nullptr;
}
CBlock* const pblock = &pblocktemplate->block; // pointer for convenience
// Add dummy coinbase tx as first transaction
pblock->vtx.emplace_back();
pblocktemplate->vTxFees.push_back(-1); // updated at end
pblocktemplate->vTxSigOpsCost.push_back(-1); // updated at end
LOCK(::cs_main);
CBlockIndex* pindexPrev = m_chainstate.m_chain.Tip();
assert(pindexPrev != nullptr);
nHeight = pindexPrev->nHeight + 1;
pblock->nVersion = m_chainstate.m_chainman.m_versionbitscache.ComputeBlockVersion(pindexPrev, chainparams.GetConsensus());
// -regtest only: allow overriding block.nVersion with
// -blockversion=N to test forking scenarios
if (chainparams.MineBlocksOnDemand()) {
pblock->nVersion = gArgs.GetIntArg("-blockversion", pblock->nVersion);
}
pblock->nTime = TicksSinceEpoch<std::chrono::seconds>(NodeClock::now());
m_lock_time_cutoff = pindexPrev->GetMedianTimePast();
int nPackagesSelected = 0;
int nDescendantsUpdated = 0;
if (m_mempool) {
LOCK(m_mempool->cs);
addPackageTxs(*m_mempool, nPackagesSelected, nDescendantsUpdated);
}
const auto time_1{SteadyClock::now()};
m_last_block_num_txs = nBlockTx;
m_last_block_weight = nBlockWeight;
// Create coinbase transaction.
CMutableTransaction coinbaseTx;
coinbaseTx.vin.resize(1);
coinbaseTx.vin[0].prevout.SetNull();
coinbaseTx.vout.resize(1);
coinbaseTx.vout[0].scriptPubKey = scriptPubKeyIn;
coinbaseTx.vout[0].nValue = nFees + GetBlockSubsidy(nHeight, chainparams.GetConsensus());
coinbaseTx.vin[0].scriptSig = CScript() << nHeight << OP_0;
pblock->vtx[0] = MakeTransactionRef(std::move(coinbaseTx));
pblocktemplate->vchCoinbaseCommitment = m_chainstate.m_chainman.GenerateCoinbaseCommitment(*pblock, pindexPrev);
pblocktemplate->vTxFees[0] = -nFees;
LogPrintf("CreateNewBlock(): block weight: %u txs: %u fees: %ld sigops %d\n", GetBlockWeight(*pblock), nBlockTx, nFees, nBlockSigOpsCost);
// Fill in header
pblock->hashPrevBlock = pindexPrev->GetBlockHash();
UpdateTime(pblock, chainparams.GetConsensus(), pindexPrev);
pblock->nBits = GetNextWorkRequired(pindexPrev, pblock, chainparams.GetConsensus());
pblock->nNonce = 0;
pblocktemplate->vTxSigOpsCost[0] = WITNESS_SCALE_FACTOR * GetLegacySigOpCount(*pblock->vtx[0]);
BlockValidationState state;
if (m_options.test_block_validity && !TestBlockValidity(state, chainparams, m_chainstate, *pblock, pindexPrev,
/*fCheckPOW=*/false, /*fCheckMerkleRoot=*/false)) {
throw std::runtime_error(strprintf("%s: TestBlockValidity failed: %s", __func__, state.ToString()));
}
const auto time_2{SteadyClock::now()};
LogPrint(BCLog::BENCH, "CreateNewBlock() packages: %.2fms (%d packages, %d updated descendants), validity: %.2fms (total %.2fms)\n",
Ticks<MillisecondsDouble>(time_1 - time_start), nPackagesSelected, nDescendantsUpdated,
Ticks<MillisecondsDouble>(time_2 - time_1),
Ticks<MillisecondsDouble>(time_2 - time_start));
return std::move(pblocktemplate);
}
void BlockAssembler::onlyUnconfirmed(CTxMemPool::setEntries& testSet)
{
for (CTxMemPool::setEntries::iterator iit = testSet.begin(); iit != testSet.end(); ) {
// Only test txs not already in the block
if (inBlock.count((*iit)->GetSharedTx()->GetHash())) {
testSet.erase(iit++);
} else {
iit++;
}
}
}
bool BlockAssembler::TestPackage(uint64_t packageSize, int64_t packageSigOpsCost) const
{
// TODO: switch to weight-based accounting for packages instead of vsize-based accounting.
if (nBlockWeight + WITNESS_SCALE_FACTOR * packageSize >= m_options.nBlockMaxWeight) {
return false;
}
if (nBlockSigOpsCost + packageSigOpsCost >= MAX_BLOCK_SIGOPS_COST) {
return false;
}
return true;
}
// Perform transaction-level checks before adding to block:
// - transaction finality (locktime)
bool BlockAssembler::TestPackageTransactions(const CTxMemPool::setEntries& package) const
{
for (CTxMemPool::txiter it : package) {
if (!IsFinalTx(it->GetTx(), nHeight, m_lock_time_cutoff)) {
return false;
}
}
return true;
}
void BlockAssembler::AddToBlock(CTxMemPool::txiter iter)
{
pblocktemplate->block.vtx.emplace_back(iter->GetSharedTx());
pblocktemplate->vTxFees.push_back(iter->GetFee());
pblocktemplate->vTxSigOpsCost.push_back(iter->GetSigOpCost());
nBlockWeight += iter->GetTxWeight();
++nBlockTx;
nBlockSigOpsCost += iter->GetSigOpCost();
nFees += iter->GetFee();
inBlock.insert(iter->GetSharedTx()->GetHash());
if (m_options.print_modified_fee) {
LogPrintf("fee rate %s txid %s\n",
CFeeRate(iter->GetModifiedFee(), iter->GetTxSize()).ToString(),
iter->GetTx().GetHash().ToString());
}
}
/** Add descendants of given transactions to mapModifiedTx with ancestor
* state updated assuming given transactions are inBlock. Returns number
* of updated descendants. */
static int UpdatePackagesForAdded(const CTxMemPool& mempool,
const CTxMemPool::setEntries& alreadyAdded,
indexed_modified_transaction_set& mapModifiedTx) EXCLUSIVE_LOCKS_REQUIRED(mempool.cs)
{
AssertLockHeld(mempool.cs);
int nDescendantsUpdated = 0;
for (CTxMemPool::txiter it : alreadyAdded) {
CTxMemPool::setEntries descendants;
mempool.CalculateDescendants(it, descendants);
// Insert all descendants (not yet in block) into the modified set
for (CTxMemPool::txiter desc : descendants) {
if (alreadyAdded.count(desc)) {
continue;
}
++nDescendantsUpdated;
modtxiter mit = mapModifiedTx.find(desc);
if (mit == mapModifiedTx.end()) {
CTxMemPoolModifiedEntry modEntry(desc);
mit = mapModifiedTx.insert(modEntry).first;
}
mapModifiedTx.modify(mit, update_for_parent_inclusion(it));
}
}
return nDescendantsUpdated;
}
void BlockAssembler::SortForBlock(const CTxMemPool::setEntries& package, std::vector<CTxMemPool::txiter>& sortedEntries)
{
// Sort package by ancestor count
// If a transaction A depends on transaction B, then A's ancestor count
// must be greater than B's. So this is sufficient to validly order the
// transactions for block inclusion.
sortedEntries.clear();
sortedEntries.insert(sortedEntries.begin(), package.begin(), package.end());
std::sort(sortedEntries.begin(), sortedEntries.end(), CompareTxIterByAncestorCount());
}
// This transaction selection algorithm orders the mempool based
// on feerate of a transaction including all unconfirmed ancestors.
// Since we don't remove transactions from the mempool as we select them
// for block inclusion, we need an alternate method of updating the feerate
// of a transaction with its not-yet-selected ancestors as we go.
// This is accomplished by walking the in-mempool descendants of selected
// transactions and storing a temporary modified state in mapModifiedTxs.
// Each time through the loop, we compare the best transaction in
// mapModifiedTxs with the next transaction in the mempool to decide what
// transaction package to work on next.
void BlockAssembler::addPackageTxs(const CTxMemPool& mempool, int& nPackagesSelected, int& nDescendantsUpdated)
{
AssertLockHeld(mempool.cs);
// mapModifiedTx will store sorted packages after they are modified
// because some of their txs are already in the block
indexed_modified_transaction_set mapModifiedTx;
// Keep track of entries that failed inclusion, to avoid duplicate work
std::set<Txid> failedTx;
CTxMemPool::indexed_transaction_set::index<ancestor_score>::type::iterator mi = mempool.mapTx.get<ancestor_score>().begin();
CTxMemPool::txiter iter;
// Limit the number of attempts to add transactions to the block when it is
// close to full; this is just a simple heuristic to finish quickly if the
// mempool has a lot of entries.
const int64_t MAX_CONSECUTIVE_FAILURES = 1000;
int64_t nConsecutiveFailed = 0;
while (mi != mempool.mapTx.get<ancestor_score>().end() || !mapModifiedTx.empty()) {
// First try to find a new transaction in mapTx to evaluate.
//
// Skip entries in mapTx that are already in a block or are present
// in mapModifiedTx (which implies that the mapTx ancestor state is
// stale due to ancestor inclusion in the block)
// Also skip transactions that we've already failed to add. This can happen if
// we consider a transaction in mapModifiedTx and it fails: we can then
// potentially consider it again while walking mapTx. It's currently
// guaranteed to fail again, but as a belt-and-suspenders check we put it in
// failedTx and avoid re-evaluation, since the re-evaluation would be using
// cached size/sigops/fee values that are not actually correct.
/** Return true if given transaction from mapTx has already been evaluated,
* or if the transaction's cached data in mapTx is incorrect. */
if (mi != mempool.mapTx.get<ancestor_score>().end()) {
auto it = mempool.mapTx.project<0>(mi);
assert(it != mempool.mapTx.end());
if (mapModifiedTx.count(it) || inBlock.count(it->GetSharedTx()->GetHash()) || failedTx.count(it->GetSharedTx()->GetHash())) {
++mi;
continue;
}
}
// Now that mi is not stale, determine which transaction to evaluate:
// the next entry from mapTx, or the best from mapModifiedTx?
bool fUsingModified = false;
modtxscoreiter modit = mapModifiedTx.get<ancestor_score>().begin();
if (mi == mempool.mapTx.get<ancestor_score>().end()) {
// We're out of entries in mapTx; use the entry from mapModifiedTx
iter = modit->iter;
fUsingModified = true;
} else {
// Try to compare the mapTx entry to the mapModifiedTx entry
iter = mempool.mapTx.project<0>(mi);
if (modit != mapModifiedTx.get<ancestor_score>().end() &&
CompareTxMemPoolEntryByAncestorFee()(*modit, CTxMemPoolModifiedEntry(iter))) {
// The best entry in mapModifiedTx has higher score
// than the one from mapTx.
// Switch which transaction (package) to consider
iter = modit->iter;
fUsingModified = true;
} else {
// Either no entry in mapModifiedTx, or it's worse than mapTx.
// Increment mi for the next loop iteration.
++mi;
}
}
// We skip mapTx entries that are inBlock, and mapModifiedTx shouldn't
// contain anything that is inBlock.
assert(!inBlock.count(iter->GetSharedTx()->GetHash()));
uint64_t packageSize = iter->GetSizeWithAncestors();
CAmount packageFees = iter->GetModFeesWithAncestors();
int64_t packageSigOpsCost = iter->GetSigOpCostWithAncestors();
if (fUsingModified) {
packageSize = modit->nSizeWithAncestors;
packageFees = modit->nModFeesWithAncestors;
packageSigOpsCost = modit->nSigOpCostWithAncestors;
}
if (packageFees < m_options.blockMinFeeRate.GetFee(packageSize)) {
// Everything else we might consider has a lower fee rate
return;
}
if (!TestPackage(packageSize, packageSigOpsCost)) {
if (fUsingModified) {
// Since we always look at the best entry in mapModifiedTx,
// we must erase failed entries so that we can consider the
// next best entry on the next loop iteration
mapModifiedTx.get<ancestor_score>().erase(modit);
failedTx.insert(iter->GetSharedTx()->GetHash());
}
++nConsecutiveFailed;
if (nConsecutiveFailed > MAX_CONSECUTIVE_FAILURES && nBlockWeight >
m_options.nBlockMaxWeight - m_options.coinbase_max_additional_weight) {
// Give up if we're close to full and haven't succeeded in a while
break;
}
continue;
}
auto ancestors{mempool.AssumeCalculateMemPoolAncestors(__func__, *iter, CTxMemPool::Limits::NoLimits(), /*fSearchForParents=*/false)};
onlyUnconfirmed(ancestors);
ancestors.insert(iter);
// Test if all tx's are Final
if (!TestPackageTransactions(ancestors)) {
if (fUsingModified) {
mapModifiedTx.get<ancestor_score>().erase(modit);
failedTx.insert(iter->GetSharedTx()->GetHash());
}
continue;
}
// This transaction will make it in; reset the failed counter.
nConsecutiveFailed = 0;
// Package can be added. Sort the entries in a valid order.
std::vector<CTxMemPool::txiter> sortedEntries;
SortForBlock(ancestors, sortedEntries);
for (size_t i = 0; i < sortedEntries.size(); ++i) {
AddToBlock(sortedEntries[i]);
// Erase from the modified set, if present
mapModifiedTx.erase(sortedEntries[i]);
}
++nPackagesSelected;
// Update transactions that depend on each of these
nDescendantsUpdated += UpdatePackagesForAdded(mempool, ancestors, mapModifiedTx);
}
}
} // namespace node