0
0
Fork 0
mirror of https://github.com/bitcoin/bitcoin.git synced 2025-02-05 10:17:30 -05:00
bitcoin-bitcoin-core/src/node/miner.cpp
glozow 40e871d9b4 [miner] always assume we can create witness blocks
Given the low possibility of a reorg reverting the segwit soft fork,
there is no need to check whether segwit is active here. Also,
TestBlockValidity is run on the block template after it has been
created.
2022-02-23 10:55:05 +00:00

451 lines
18 KiB
C++

// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2021 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <node/miner.h>
#include <chain.h>
#include <chainparams.h>
#include <coins.h>
#include <consensus/amount.h>
#include <consensus/consensus.h>
#include <consensus/merkle.h>
#include <consensus/tx_verify.h>
#include <consensus/validation.h>
#include <deploymentstatus.h>
#include <policy/feerate.h>
#include <policy/policy.h>
#include <pow.h>
#include <primitives/transaction.h>
#include <timedata.h>
#include <util/moneystr.h>
#include <util/system.h>
#include <validation.h>
#include <algorithm>
#include <utility>
namespace node {
int64_t UpdateTime(CBlockHeader* pblock, const Consensus::Params& consensusParams, const CBlockIndex* pindexPrev)
{
int64_t nOldTime = pblock->nTime;
int64_t nNewTime = std::max(pindexPrev->GetMedianTimePast() + 1, GetAdjustedTime());
if (nOldTime < nNewTime) {
pblock->nTime = nNewTime;
}
// Updating time can change work required on testnet:
if (consensusParams.fPowAllowMinDifficultyBlocks) {
pblock->nBits = GetNextWorkRequired(pindexPrev, pblock, consensusParams);
}
return nNewTime - nOldTime;
}
void RegenerateCommitments(CBlock& block, ChainstateManager& chainman)
{
CMutableTransaction tx{*block.vtx.at(0)};
tx.vout.erase(tx.vout.begin() + GetWitnessCommitmentIndex(block));
block.vtx.at(0) = MakeTransactionRef(tx);
CBlockIndex* prev_block = WITH_LOCK(::cs_main, return chainman.m_blockman.LookupBlockIndex(block.hashPrevBlock));
GenerateCoinbaseCommitment(block, prev_block, Params().GetConsensus());
block.hashMerkleRoot = BlockMerkleRoot(block);
}
BlockAssembler::Options::Options()
{
blockMinFeeRate = CFeeRate(DEFAULT_BLOCK_MIN_TX_FEE);
nBlockMaxWeight = DEFAULT_BLOCK_MAX_WEIGHT;
}
BlockAssembler::BlockAssembler(CChainState& chainstate, const CTxMemPool& mempool, const CChainParams& params, const Options& options)
: chainparams(params),
m_mempool(mempool),
m_chainstate(chainstate)
{
blockMinFeeRate = options.blockMinFeeRate;
// Limit weight to between 4K and MAX_BLOCK_WEIGHT-4K for sanity:
nBlockMaxWeight = std::max<size_t>(4000, std::min<size_t>(MAX_BLOCK_WEIGHT - 4000, options.nBlockMaxWeight));
}
static BlockAssembler::Options DefaultOptions()
{
// Block resource limits
// If -blockmaxweight is not given, limit to DEFAULT_BLOCK_MAX_WEIGHT
BlockAssembler::Options options;
options.nBlockMaxWeight = gArgs.GetIntArg("-blockmaxweight", DEFAULT_BLOCK_MAX_WEIGHT);
if (gArgs.IsArgSet("-blockmintxfee")) {
std::optional<CAmount> parsed = ParseMoney(gArgs.GetArg("-blockmintxfee", ""));
options.blockMinFeeRate = CFeeRate{parsed.value_or(DEFAULT_BLOCK_MIN_TX_FEE)};
} else {
options.blockMinFeeRate = CFeeRate{DEFAULT_BLOCK_MIN_TX_FEE};
}
return options;
}
BlockAssembler::BlockAssembler(CChainState& chainstate, const CTxMemPool& mempool, const CChainParams& params)
: BlockAssembler(chainstate, mempool, params, DefaultOptions()) {}
void BlockAssembler::resetBlock()
{
inBlock.clear();
// Reserve space for coinbase tx
nBlockWeight = 4000;
nBlockSigOpsCost = 400;
// These counters do not include coinbase tx
nBlockTx = 0;
nFees = 0;
}
std::unique_ptr<CBlockTemplate> BlockAssembler::CreateNewBlock(const CScript& scriptPubKeyIn)
{
int64_t nTimeStart = GetTimeMicros();
resetBlock();
pblocktemplate.reset(new CBlockTemplate());
if (!pblocktemplate.get()) {
return nullptr;
}
CBlock* const pblock = &pblocktemplate->block; // pointer for convenience
// Add dummy coinbase tx as first transaction
pblock->vtx.emplace_back();
pblocktemplate->vTxFees.push_back(-1); // updated at end
pblocktemplate->vTxSigOpsCost.push_back(-1); // updated at end
LOCK2(cs_main, m_mempool.cs);
CBlockIndex* pindexPrev = m_chainstate.m_chain.Tip();
assert(pindexPrev != nullptr);
nHeight = pindexPrev->nHeight + 1;
pblock->nVersion = g_versionbitscache.ComputeBlockVersion(pindexPrev, chainparams.GetConsensus());
// -regtest only: allow overriding block.nVersion with
// -blockversion=N to test forking scenarios
if (chainparams.MineBlocksOnDemand()) {
pblock->nVersion = gArgs.GetIntArg("-blockversion", pblock->nVersion);
}
pblock->nTime = GetAdjustedTime();
m_lock_time_cutoff = pindexPrev->GetMedianTimePast();
int nPackagesSelected = 0;
int nDescendantsUpdated = 0;
addPackageTxs(nPackagesSelected, nDescendantsUpdated);
int64_t nTime1 = GetTimeMicros();
m_last_block_num_txs = nBlockTx;
m_last_block_weight = nBlockWeight;
// Create coinbase transaction.
CMutableTransaction coinbaseTx;
coinbaseTx.vin.resize(1);
coinbaseTx.vin[0].prevout.SetNull();
coinbaseTx.vout.resize(1);
coinbaseTx.vout[0].scriptPubKey = scriptPubKeyIn;
coinbaseTx.vout[0].nValue = nFees + GetBlockSubsidy(nHeight, chainparams.GetConsensus());
coinbaseTx.vin[0].scriptSig = CScript() << nHeight << OP_0;
pblock->vtx[0] = MakeTransactionRef(std::move(coinbaseTx));
pblocktemplate->vchCoinbaseCommitment = GenerateCoinbaseCommitment(*pblock, pindexPrev, chainparams.GetConsensus());
pblocktemplate->vTxFees[0] = -nFees;
LogPrintf("CreateNewBlock(): block weight: %u txs: %u fees: %ld sigops %d\n", GetBlockWeight(*pblock), nBlockTx, nFees, nBlockSigOpsCost);
// Fill in header
pblock->hashPrevBlock = pindexPrev->GetBlockHash();
UpdateTime(pblock, chainparams.GetConsensus(), pindexPrev);
pblock->nBits = GetNextWorkRequired(pindexPrev, pblock, chainparams.GetConsensus());
pblock->nNonce = 0;
pblocktemplate->vTxSigOpsCost[0] = WITNESS_SCALE_FACTOR * GetLegacySigOpCount(*pblock->vtx[0]);
BlockValidationState state;
if (!TestBlockValidity(state, chainparams, m_chainstate, *pblock, pindexPrev, false, false)) {
throw std::runtime_error(strprintf("%s: TestBlockValidity failed: %s", __func__, state.ToString()));
}
int64_t nTime2 = GetTimeMicros();
LogPrint(BCLog::BENCH, "CreateNewBlock() packages: %.2fms (%d packages, %d updated descendants), validity: %.2fms (total %.2fms)\n", 0.001 * (nTime1 - nTimeStart), nPackagesSelected, nDescendantsUpdated, 0.001 * (nTime2 - nTime1), 0.001 * (nTime2 - nTimeStart));
return std::move(pblocktemplate);
}
void BlockAssembler::onlyUnconfirmed(CTxMemPool::setEntries& testSet)
{
for (CTxMemPool::setEntries::iterator iit = testSet.begin(); iit != testSet.end(); ) {
// Only test txs not already in the block
if (inBlock.count(*iit)) {
testSet.erase(iit++);
} else {
iit++;
}
}
}
bool BlockAssembler::TestPackage(uint64_t packageSize, int64_t packageSigOpsCost) const
{
// TODO: switch to weight-based accounting for packages instead of vsize-based accounting.
if (nBlockWeight + WITNESS_SCALE_FACTOR * packageSize >= nBlockMaxWeight) {
return false;
}
if (nBlockSigOpsCost + packageSigOpsCost >= MAX_BLOCK_SIGOPS_COST) {
return false;
}
return true;
}
// Perform transaction-level checks before adding to block:
// - transaction finality (locktime)
bool BlockAssembler::TestPackageTransactions(const CTxMemPool::setEntries& package) const
{
for (CTxMemPool::txiter it : package) {
if (!IsFinalTx(it->GetTx(), nHeight, m_lock_time_cutoff)) {
return false;
}
}
return true;
}
void BlockAssembler::AddToBlock(CTxMemPool::txiter iter)
{
pblocktemplate->block.vtx.emplace_back(iter->GetSharedTx());
pblocktemplate->vTxFees.push_back(iter->GetFee());
pblocktemplate->vTxSigOpsCost.push_back(iter->GetSigOpCost());
nBlockWeight += iter->GetTxWeight();
++nBlockTx;
nBlockSigOpsCost += iter->GetSigOpCost();
nFees += iter->GetFee();
inBlock.insert(iter);
bool fPrintPriority = gArgs.GetBoolArg("-printpriority", DEFAULT_PRINTPRIORITY);
if (fPrintPriority) {
LogPrintf("fee rate %s txid %s\n",
CFeeRate(iter->GetModifiedFee(), iter->GetTxSize()).ToString(),
iter->GetTx().GetHash().ToString());
}
}
int BlockAssembler::UpdatePackagesForAdded(const CTxMemPool::setEntries& alreadyAdded,
indexed_modified_transaction_set &mapModifiedTx)
{
AssertLockHeld(m_mempool.cs);
int nDescendantsUpdated = 0;
for (CTxMemPool::txiter it : alreadyAdded) {
CTxMemPool::setEntries descendants;
m_mempool.CalculateDescendants(it, descendants);
// Insert all descendants (not yet in block) into the modified set
for (CTxMemPool::txiter desc : descendants) {
if (alreadyAdded.count(desc)) {
continue;
}
++nDescendantsUpdated;
modtxiter mit = mapModifiedTx.find(desc);
if (mit == mapModifiedTx.end()) {
CTxMemPoolModifiedEntry modEntry(desc);
modEntry.nSizeWithAncestors -= it->GetTxSize();
modEntry.nModFeesWithAncestors -= it->GetModifiedFee();
modEntry.nSigOpCostWithAncestors -= it->GetSigOpCost();
mapModifiedTx.insert(modEntry);
} else {
mapModifiedTx.modify(mit, update_for_parent_inclusion(it));
}
}
}
return nDescendantsUpdated;
}
// Skip entries in mapTx that are already in a block or are present
// in mapModifiedTx (which implies that the mapTx ancestor state is
// stale due to ancestor inclusion in the block)
// Also skip transactions that we've already failed to add. This can happen if
// we consider a transaction in mapModifiedTx and it fails: we can then
// potentially consider it again while walking mapTx. It's currently
// guaranteed to fail again, but as a belt-and-suspenders check we put it in
// failedTx and avoid re-evaluation, since the re-evaluation would be using
// cached size/sigops/fee values that are not actually correct.
bool BlockAssembler::SkipMapTxEntry(CTxMemPool::txiter it, indexed_modified_transaction_set& mapModifiedTx, CTxMemPool::setEntries& failedTx)
{
AssertLockHeld(m_mempool.cs);
assert(it != m_mempool.mapTx.end());
return mapModifiedTx.count(it) || inBlock.count(it) || failedTx.count(it);
}
void BlockAssembler::SortForBlock(const CTxMemPool::setEntries& package, std::vector<CTxMemPool::txiter>& sortedEntries)
{
// Sort package by ancestor count
// If a transaction A depends on transaction B, then A's ancestor count
// must be greater than B's. So this is sufficient to validly order the
// transactions for block inclusion.
sortedEntries.clear();
sortedEntries.insert(sortedEntries.begin(), package.begin(), package.end());
std::sort(sortedEntries.begin(), sortedEntries.end(), CompareTxIterByAncestorCount());
}
// This transaction selection algorithm orders the mempool based
// on feerate of a transaction including all unconfirmed ancestors.
// Since we don't remove transactions from the mempool as we select them
// for block inclusion, we need an alternate method of updating the feerate
// of a transaction with its not-yet-selected ancestors as we go.
// This is accomplished by walking the in-mempool descendants of selected
// transactions and storing a temporary modified state in mapModifiedTxs.
// Each time through the loop, we compare the best transaction in
// mapModifiedTxs with the next transaction in the mempool to decide what
// transaction package to work on next.
void BlockAssembler::addPackageTxs(int& nPackagesSelected, int& nDescendantsUpdated)
{
AssertLockHeld(m_mempool.cs);
// mapModifiedTx will store sorted packages after they are modified
// because some of their txs are already in the block
indexed_modified_transaction_set mapModifiedTx;
// Keep track of entries that failed inclusion, to avoid duplicate work
CTxMemPool::setEntries failedTx;
// Start by adding all descendants of previously added txs to mapModifiedTx
// and modifying them for their already included ancestors
UpdatePackagesForAdded(inBlock, mapModifiedTx);
CTxMemPool::indexed_transaction_set::index<ancestor_score>::type::iterator mi = m_mempool.mapTx.get<ancestor_score>().begin();
CTxMemPool::txiter iter;
// Limit the number of attempts to add transactions to the block when it is
// close to full; this is just a simple heuristic to finish quickly if the
// mempool has a lot of entries.
const int64_t MAX_CONSECUTIVE_FAILURES = 1000;
int64_t nConsecutiveFailed = 0;
while (mi != m_mempool.mapTx.get<ancestor_score>().end() || !mapModifiedTx.empty()) {
// First try to find a new transaction in mapTx to evaluate.
if (mi != m_mempool.mapTx.get<ancestor_score>().end() &&
SkipMapTxEntry(m_mempool.mapTx.project<0>(mi), mapModifiedTx, failedTx)) {
++mi;
continue;
}
// Now that mi is not stale, determine which transaction to evaluate:
// the next entry from mapTx, or the best from mapModifiedTx?
bool fUsingModified = false;
modtxscoreiter modit = mapModifiedTx.get<ancestor_score>().begin();
if (mi == m_mempool.mapTx.get<ancestor_score>().end()) {
// We're out of entries in mapTx; use the entry from mapModifiedTx
iter = modit->iter;
fUsingModified = true;
} else {
// Try to compare the mapTx entry to the mapModifiedTx entry
iter = m_mempool.mapTx.project<0>(mi);
if (modit != mapModifiedTx.get<ancestor_score>().end() &&
CompareTxMemPoolEntryByAncestorFee()(*modit, CTxMemPoolModifiedEntry(iter))) {
// The best entry in mapModifiedTx has higher score
// than the one from mapTx.
// Switch which transaction (package) to consider
iter = modit->iter;
fUsingModified = true;
} else {
// Either no entry in mapModifiedTx, or it's worse than mapTx.
// Increment mi for the next loop iteration.
++mi;
}
}
// We skip mapTx entries that are inBlock, and mapModifiedTx shouldn't
// contain anything that is inBlock.
assert(!inBlock.count(iter));
uint64_t packageSize = iter->GetSizeWithAncestors();
CAmount packageFees = iter->GetModFeesWithAncestors();
int64_t packageSigOpsCost = iter->GetSigOpCostWithAncestors();
if (fUsingModified) {
packageSize = modit->nSizeWithAncestors;
packageFees = modit->nModFeesWithAncestors;
packageSigOpsCost = modit->nSigOpCostWithAncestors;
}
if (packageFees < blockMinFeeRate.GetFee(packageSize)) {
// Everything else we might consider has a lower fee rate
return;
}
if (!TestPackage(packageSize, packageSigOpsCost)) {
if (fUsingModified) {
// Since we always look at the best entry in mapModifiedTx,
// we must erase failed entries so that we can consider the
// next best entry on the next loop iteration
mapModifiedTx.get<ancestor_score>().erase(modit);
failedTx.insert(iter);
}
++nConsecutiveFailed;
if (nConsecutiveFailed > MAX_CONSECUTIVE_FAILURES && nBlockWeight >
nBlockMaxWeight - 4000) {
// Give up if we're close to full and haven't succeeded in a while
break;
}
continue;
}
CTxMemPool::setEntries ancestors;
uint64_t nNoLimit = std::numeric_limits<uint64_t>::max();
std::string dummy;
m_mempool.CalculateMemPoolAncestors(*iter, ancestors, nNoLimit, nNoLimit, nNoLimit, nNoLimit, dummy, false);
onlyUnconfirmed(ancestors);
ancestors.insert(iter);
// Test if all tx's are Final
if (!TestPackageTransactions(ancestors)) {
if (fUsingModified) {
mapModifiedTx.get<ancestor_score>().erase(modit);
failedTx.insert(iter);
}
continue;
}
// This transaction will make it in; reset the failed counter.
nConsecutiveFailed = 0;
// Package can be added. Sort the entries in a valid order.
std::vector<CTxMemPool::txiter> sortedEntries;
SortForBlock(ancestors, sortedEntries);
for (size_t i = 0; i < sortedEntries.size(); ++i) {
AddToBlock(sortedEntries[i]);
// Erase from the modified set, if present
mapModifiedTx.erase(sortedEntries[i]);
}
++nPackagesSelected;
// Update transactions that depend on each of these
nDescendantsUpdated += UpdatePackagesForAdded(ancestors, mapModifiedTx);
}
}
void IncrementExtraNonce(CBlock* pblock, const CBlockIndex* pindexPrev, unsigned int& nExtraNonce)
{
// Update nExtraNonce
static uint256 hashPrevBlock;
if (hashPrevBlock != pblock->hashPrevBlock) {
nExtraNonce = 0;
hashPrevBlock = pblock->hashPrevBlock;
}
++nExtraNonce;
unsigned int nHeight = pindexPrev->nHeight + 1; // Height first in coinbase required for block.version=2
CMutableTransaction txCoinbase(*pblock->vtx[0]);
txCoinbase.vin[0].scriptSig = (CScript() << nHeight << CScriptNum(nExtraNonce));
assert(txCoinbase.vin[0].scriptSig.size() <= 100);
pblock->vtx[0] = MakeTransactionRef(std::move(txCoinbase));
pblock->hashMerkleRoot = BlockMerkleRoot(*pblock);
}
} // namespace node