This commit is contained in:
Suhas Daftuar 2024-04-29 04:28:31 +02:00 committed by GitHub
commit 24bd4714b5
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
58 changed files with 5400 additions and 2747 deletions

View File

@ -969,6 +969,48 @@ AC_CHECK_DECLS([setsid])
AC_CHECK_DECLS([pipe2])
AC_CHECK_FUNCS([timingsafe_bcmp])
AC_MSG_CHECKING([for __builtin_clzl])
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ ]], [[
(void) __builtin_clzl(0);
]])],
[ AC_MSG_RESULT([yes]); have_clzl=yes; AC_DEFINE([HAVE_BUILTIN_CLZL], [1], [Define this symbol if you have __builtin_clzl])],
[ AC_MSG_RESULT([no]); have_clzl=no;]
)
AC_MSG_CHECKING([for __builtin_clzll])
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ ]], [[
(void) __builtin_clzll(0);
]])],
[ AC_MSG_RESULT([yes]); have_clzll=yes; AC_DEFINE([HAVE_BUILTIN_CLZLL], [1], [Define this symbol if you have __builtin_clzll])],
[ AC_MSG_RESULT([no]); have_clzll=no;]
)
AC_MSG_CHECKING([for __builtin_ctz])
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ ]], [[
(void) __builtin_ctz(0);
]])],
[ AC_MSG_RESULT([yes]); have_ctz=yes; AC_DEFINE([HAVE_BUILTIN_CTZ], [1], [Define this symbol if you have __builtin_ctz])],
[ AC_MSG_RESULT([no]); have_ctz=no;]
)
AC_MSG_CHECKING([for __builtin_ctzl])
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ ]], [[
(void) __builtin_ctzl(0);
]])],
[ AC_MSG_RESULT([yes]); have_ctzl=yes; AC_DEFINE([HAVE_BUILTIN_CTZL], [1], [Define this symbol if you have __builtin_ctzl])],
[ AC_MSG_RESULT([no]); have_ctzl=no;]
)
AC_MSG_CHECKING([for __builtin_ctzll])
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ ]], [[
(void) __builtin_ctzll(0);
]])],
[ AC_MSG_RESULT([yes]); have_ctzll=yes; AC_DEFINE([HAVE_BUILTIN_CTZLL], [1], [Define this symbol if you have __builtin_ctzll])],
[ AC_MSG_RESULT([no]); have_ctzll=no;]
)
dnl Check for malloc_info (for memory statistics information in getmemoryinfo)
AC_MSG_CHECKING([for getmemoryinfo])
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <malloc.h>]],

View File

@ -19,12 +19,7 @@ other consensus and policy rules, each of the following conditions are met:
Use the (`-mempoolfullrbf`) configuration option to allow transaction replacement without enforcement of the
opt-in signaling rule.
2. The replacement transaction only include an unconfirmed input if that input was included in
one of the directly conflicting transactions. An unconfirmed input spends an output from a
currently-unconfirmed transaction.
*Rationale*: When RBF was originally implemented, the mempool did not keep track of
ancestor feerates yet. This rule was suggested as a temporary restriction.
2. [REDACTED]
3. The replacement transaction pays an absolute fee of at least the sum paid by the original
transactions.
@ -45,23 +40,16 @@ other consensus and policy rules, each of the following conditions are met:
*Rationale*: Try to prevent DoS attacks where an attacker causes the network to repeatedly relay
transactions each paying a tiny additional amount in fees, e.g. just 1 satoshi.
5. The number of original transactions does not exceed 100. More precisely, the sum of all
directly conflicting transactions' descendant counts (number of transactions inclusive of itself
and its descendants) must not exceed 100; it is possible that this overestimates the true number
of original transactions.
5. The number of directly conflicting transactions does not exceed 100.
*Rationale*: Try to prevent DoS attacks where an attacker is able to easily occupy and flush out
significant portions of the node's mempool using replacements with multiple directly conflicting
transactions, each with large descendant sets.
*Rationale*: Limit CPU usage required to update the mempool for so many transactions being
removed at once.
6. The replacement transaction's feerate is greater than the feerates of all directly conflicting
transactions.
6. The feerate diagram of the mempool must be strictly improved by the replacement transaction.
*Rationale*: This rule was originally intended to ensure that the replacement transaction is
preferable for block-inclusion, compared to what would be removed from the mempool. This rule
predates ancestor feerate-based transaction selection.
*Rationale*: This ensures that block fees in all future blocks will go up
after the replacement (ignoring tail effects at the end of a block).
This set of rules is similar but distinct from BIP125.
## History
@ -80,3 +68,5 @@ This set of rules is similar but distinct from BIP125.
* Full replace-by-fee enabled as a configurable mempool policy as of **v24.0** ([PR
#25353](https://github.com/bitcoin/bitcoin/pull/25353)).
* Feerate diagram policy enabled in conjunction with switch to cluster mempool as of **v??.0**.

View File

@ -132,6 +132,7 @@ BITCOIN_CORE_H = \
chainparamsseeds.h \
checkqueue.h \
clientversion.h \
cluster_linearize.h \
coins.h \
common/args.h \
common/bloom.h \
@ -192,6 +193,7 @@ BITCOIN_CORE_H = \
kernel/mempool_removal_reason.h \
kernel/messagestartchars.h \
kernel/notifications_interface.h \
kernel/txgraph.h \
kernel/validation_cache_sizes.h \
key.h \
key_io.h \
@ -292,6 +294,7 @@ BITCOIN_CORE_H = \
util/batchpriority.h \
util/bip32.h \
util/bitdeque.h \
util/bitset.h \
util/bytevectorhash.h \
util/chaintype.h \
util/check.h \
@ -408,6 +411,7 @@ libbitcoin_node_a_SOURCES = \
kernel/disconnected_transactions.cpp \
kernel/mempool_persist.cpp \
kernel/mempool_removal_reason.cpp \
kernel/txgraph.cpp \
mapport.cpp \
net.cpp \
net_processing.cpp \
@ -945,6 +949,7 @@ libbitcoinkernel_la_SOURCES = \
kernel/disconnected_transactions.cpp \
kernel/mempool_persist.cpp \
kernel/mempool_removal_reason.cpp \
kernel/txgraph.cpp \
key.cpp \
logging.cpp \
node/blockstorage.cpp \

View File

@ -157,6 +157,7 @@ BITCOIN_TESTS =\
test/torcontrol_tests.cpp \
test/transaction_tests.cpp \
test/translation_tests.cpp \
test/txgraph_tests.cpp \
test/txindex_tests.cpp \
test/txpackage_tests.cpp \
test/txreconciliation_tests.cpp \
@ -390,6 +391,7 @@ test_fuzz_fuzz_SOURCES = \
test/fuzz/tx_in.cpp \
test/fuzz/tx_out.cpp \
test/fuzz/tx_pool.cpp \
test/fuzz/txgraph.cpp \
test/fuzz/txorphan.cpp \
test/fuzz/txrequest.cpp \
test/fuzz/utxo_snapshot.cpp \

View File

@ -13,7 +13,7 @@
#include <vector>
static void AddTx(const CTransactionRef& tx, CTxMemPool& pool) EXCLUSIVE_LOCKS_REQUIRED(cs_main, pool.cs)
static void AddTx(const CTransactionRef& tx, CTxMemPool& pool, FastRandomContext& det_rand) EXCLUSIVE_LOCKS_REQUIRED(cs_main, pool.cs)
{
int64_t nTime = 0;
unsigned int nHeight = 1;
@ -21,7 +21,7 @@ static void AddTx(const CTransactionRef& tx, CTxMemPool& pool) EXCLUSIVE_LOCKS_R
bool spendsCoinbase = false;
unsigned int sigOpCost = 4;
LockPoints lp;
pool.addUnchecked(CTxMemPoolEntry(tx, 1000, nTime, nHeight, sequence, spendsCoinbase, sigOpCost, lp));
pool.addUnchecked(CTxMemPoolEntry(tx, det_rand.randrange(10000)+1000, nTime, nHeight, sequence, spendsCoinbase, sigOpCost, lp));
}
struct Available {
@ -31,15 +31,17 @@ struct Available {
Available(CTransactionRef& ref, size_t tx_count) : ref(ref), tx_count(tx_count){}
};
static std::vector<CTransactionRef> CreateOrderedCoins(FastRandomContext& det_rand, int childTxs, int min_ancestors)
// Create a cluster of transactions, randomly.
static std::vector<CTransactionRef> CreateCoinCluster(FastRandomContext& det_rand, int childTxs, int min_ancestors)
{
std::vector<Available> available_coins;
std::vector<CTransactionRef> ordered_coins;
// Create some base transactions
size_t tx_counter = 1;
for (auto x = 0; x < 100; ++x) {
for (auto x = 0; x < 10; ++x) {
CMutableTransaction tx = CMutableTransaction();
tx.vin.resize(1);
tx.vin[0].prevout = COutPoint(Txid::FromUint256(GetRandHash()), 1);
tx.vin[0].scriptSig = CScript() << CScriptNum(tx_counter);
tx.vin[0].scriptWitness.stack.push_back(CScriptNum(x).getvch());
tx.vout.resize(det_rand.randrange(10)+2);
@ -83,26 +85,104 @@ static std::vector<CTransactionRef> CreateOrderedCoins(FastRandomContext& det_ra
return ordered_coins;
}
static void ComplexMemPool(benchmark::Bench& bench)
static void MemPoolAddTransactions(benchmark::Bench& bench)
{
FastRandomContext det_rand{true};
int childTxs = 800;
int childTxs = 90;
if (bench.complexityN() > 1) {
childTxs = static_cast<int>(bench.complexityN());
}
std::vector<CTransactionRef> ordered_coins = CreateOrderedCoins(det_rand, childTxs, /*min_ancestors=*/1);
const auto testing_setup = MakeNoLogFileContext<const TestingSetup>(ChainType::MAIN);
CTxMemPool& pool = *testing_setup.get()->m_node.mempool;
std::vector<CTransactionRef> transactions;
// Create 1000 clusters of 100 transactions each
for (int i=0; i<100; i++) {
auto new_txs = CreateCoinCluster(det_rand, childTxs, /*min_ancestors*/ 1);
transactions.insert(transactions.end(), new_txs.begin(), new_txs.end());
}
LOCK2(cs_main, pool.cs);
bench.run([&]() NO_THREAD_SAFETY_ANALYSIS {
for (auto& tx : ordered_coins) {
AddTx(tx, pool);
for (auto& tx : transactions) {
AddTx(tx, pool, det_rand);
}
pool.TrimToSize(pool.DynamicMemoryUsage() * 3 / 4);
pool.TrimToSize(GetVirtualTransactionSize(*ordered_coins.front()));
pool.TrimToSize(0, nullptr);
});
}
static void ComplexMemPool(benchmark::Bench& bench)
{
FastRandomContext det_rand{true};
int childTxs = 90;
if (bench.complexityN() > 1) {
childTxs = static_cast<int>(bench.complexityN());
}
const auto testing_setup = MakeNoLogFileContext<const TestingSetup>(ChainType::MAIN);
CTxMemPool& pool = *testing_setup.get()->m_node.mempool;
std::vector<CTransactionRef> tx_remove_for_block;
std::vector<uint256> hashes_remove_for_block;
LOCK2(cs_main, pool.cs);
for (int i=0; i<1000; i++) {
std::vector<CTransactionRef> transactions = CreateCoinCluster(det_rand, childTxs, /*min_ancestors=*/1);
// Add all transactions to the mempool.
// Also store the first 10 transactions from each cluster as the
// transactions we'll "mine" in the the benchmark.
int tx_count = 0;
for (auto& tx : transactions) {
if (tx_count < 10) {
tx_remove_for_block.push_back(tx);
++tx_count;
hashes_remove_for_block.emplace_back(tx->GetHash());
}
AddTx(tx, pool, det_rand);
}
}
// Since the benchmark will be run repeatedly, we have to leave the mempool
// in the same state at the end of the function, so we benchmark both
// mining a block and reorging the block's contents back into the mempool.
bench.run([&]() NO_THREAD_SAFETY_ANALYSIS {
pool.removeForBlock(tx_remove_for_block, /*nBlockHeight*/100);
for (auto& tx: tx_remove_for_block) {
AddTx(tx, pool, det_rand);
}
pool.UpdateTransactionsFromBlock(hashes_remove_for_block);
});
}
static void MemPoolAncestorsDescendants(benchmark::Bench& bench)
{
FastRandomContext det_rand{true};
int childTxs = 90;
if (bench.complexityN() > 1) {
childTxs = static_cast<int>(bench.complexityN());
}
const auto testing_setup = MakeNoLogFileContext<const TestingSetup>(ChainType::MAIN);
CTxMemPool& pool = *testing_setup.get()->m_node.mempool;
LOCK2(cs_main, pool.cs);
std::vector<CTransactionRef> transactions = CreateCoinCluster(det_rand, childTxs, /*min_ancestors=*/1);
for (auto& tx : transactions) {
AddTx(tx, pool, det_rand);
}
CTxMemPool::txiter first_tx = *pool.GetIter(transactions[0]->GetHash());
CTxMemPool::txiter last_tx = *pool.GetIter(transactions.back()->GetHash());
bench.run([&]() NO_THREAD_SAFETY_ANALYSIS {
ankerl::nanobench::doNotOptimizeAway(pool.CalculateDescendants({first_tx}));
ankerl::nanobench::doNotOptimizeAway(pool.CalculateMemPoolAncestors(*last_tx, false));
});
}
static void MempoolCheck(benchmark::Bench& bench)
{
FastRandomContext det_rand{true};
@ -112,11 +192,73 @@ static void MempoolCheck(benchmark::Bench& bench)
testing_setup->PopulateMempool(det_rand, 400, true);
const CCoinsViewCache& coins_tip = testing_setup.get()->m_node.chainman->ActiveChainstate().CoinsTip();
CTxMemPool::Limits limits(kernel::MemPoolLimits::NoLimits());
bench.run([&]() NO_THREAD_SAFETY_ANALYSIS {
// Bump up the spendheight so we don't hit premature coinbase spend errors.
pool.check(coins_tip, /*spendheight=*/300);
pool.check(coins_tip, /*spendheight=*/300, &limits);
});
}
#if 0
static void MemPoolMiningScoreCheck(benchmark::Bench& bench)
{
// Default test: each cluster is of size 20, and we'll try to RBF with a
// transaction that merges 10 clusters, evicting 10 transactions from each.
FastRandomContext det_rand{true};
int childTxs = 10;
if (bench.complexityN() > 1) {
childTxs = static_cast<int>(bench.complexityN());
}
const auto testing_setup = MakeNoLogFileContext<const TestingSetup>(ChainType::MAIN);
CTxMemPool& pool = *testing_setup.get()->m_node.mempool;
LOCK2(cs_main, pool.cs);
std::vector<CTransactionRef> parent_txs_for_rbf;
std::set<uint256> child_txs_to_conflict_with;
for (int i=0; i<10; i++) {
std::vector<CTransactionRef> transactions = CreateCoinCluster(det_rand, childTxs, /*min_ancestors=*/1);
parent_txs_for_rbf.push_back(transactions[0]);
// Conflict with everything after the first 10 transactions
for (size_t j=10; j<transactions.size(); ++j) {
child_txs_to_conflict_with.insert(transactions[j]->GetHash());
}
// Add all transactions to the mempool.
for (auto& tx : transactions) {
AddTx(tx, pool, det_rand);
}
}
// Construct a transaction that spends from each of the parent transactions
// selected.
CMutableTransaction tx = CMutableTransaction();
tx.vin.resize(10);
for (size_t i=0; i<parent_txs_for_rbf.size(); ++i) {
tx.vin[i].prevout = COutPoint(parent_txs_for_rbf[i]->GetHash(), 0);
tx.vin[i].scriptSig = CScript() << i;
}
tx.vout.resize(1);
for (auto& out : tx.vout) {
out.scriptPubKey = CScript() << CScriptNum(det_rand.randrange(19)+1) << OP_EQUAL;
out.nValue = 10 * COIN;
}
CTxMemPool::setEntries all_conflicts = pool.GetIterSet(child_txs_to_conflict_with);
CTxMemPoolEntry entry(MakeTransactionRef(tx), det_rand.randrange(10000)+1000, 0, 1, 0, false, 4, LockPoints());
bench.run([&]() NO_THREAD_SAFETY_ANALYSIS {
CTxMemPool::Limits limits(pool.m_limits);
pool.CalculateMiningScoreOfReplacementTx(entry, det_rand.randrange(30000)+1000, all_conflicts, limits);
});
}
#endif
BENCHMARK(MemPoolAncestorsDescendants, benchmark::PriorityLevel::HIGH);
BENCHMARK(MemPoolAddTransactions, benchmark::PriorityLevel::HIGH);
BENCHMARK(ComplexMemPool, benchmark::PriorityLevel::HIGH);
BENCHMARK(MempoolCheck, benchmark::PriorityLevel::HIGH);
//BENCHMARK(MemPoolMiningScoreCheck, benchmark::PriorityLevel::HIGH);

1178
src/cluster_linearize.h Normal file

File diff suppressed because it is too large Load Diff

View File

@ -612,6 +612,9 @@ void SetupServerArgs(ArgsManager& argsman)
argsman.AddArg("-limitdescendantcount=<n>", strprintf("Do not accept transactions if any ancestor would have <n> or more in-mempool descendants (default: %u)", DEFAULT_DESCENDANT_LIMIT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
argsman.AddArg("-limitdescendantsize=<n>", strprintf("Do not accept transactions if any ancestor would have more than <n> kilobytes of in-mempool descendants (default: %u).", DEFAULT_DESCENDANT_SIZE_LIMIT_KVB), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
argsman.AddArg("-test=<option>", "Pass a test-only option. Options include : " + Join(TEST_OPTIONS_DOC, ", ") + ".", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
argsman.AddArg("-limitclustercount=<n>", strprintf("Do not accept transactions connected to <n> or more existing in-mempool transactions (default: %u)", DEFAULT_CLUSTER_LIMIT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
argsman.AddArg("-limitclustersize=<n>", strprintf("Do not accept transactions whose size with all in-mempool connected transactions exceeds <n> kilobytes (default: %u)", DEFAULT_CLUSTER_SIZE_LIMIT_KVB), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
argsman.AddArg("-addrmantest", "Allows to test address relay on localhost", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
argsman.AddArg("-capturemessages", "Capture all P2P messages to disk", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
argsman.AddArg("-mocktime=<n>", "Replace actual time with " + UNIX_EPOCH_TIME + " (default: 0)", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
argsman.AddArg("-maxsigcachesize=<n>", strprintf("Limit sum of signature cache and script execution cache sizes to <n> MiB (default: %u)", DEFAULT_MAX_SIG_CACHE_BYTES >> 20), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);

View File

@ -8,6 +8,7 @@
#include <consensus/amount.h>
#include <consensus/validation.h>
#include <core_memusage.h>
#include <kernel/txgraph.h>
#include <policy/policy.h>
#include <policy/settings.h>
#include <primitives/transaction.h>
@ -62,13 +63,10 @@ struct CompareIteratorByHash {
*
*/
class CTxMemPoolEntry
class CTxMemPoolEntry : public TxEntry
{
public:
typedef std::reference_wrapper<const CTxMemPoolEntry> CTxMemPoolEntryRef;
// two aliases, should the types ever diverge
typedef std::set<CTxMemPoolEntryRef, CompareIteratorByHash> Parents;
typedef std::set<CTxMemPoolEntryRef, CompareIteratorByHash> Children;
private:
CTxMemPoolEntry(const CTxMemPoolEntry&) = default;
@ -77,8 +75,6 @@ private:
};
const CTransactionRef tx;
mutable Parents m_parents;
mutable Children m_children;
const CAmount nFee; //!< Cached to avoid expensive parent-transaction lookups
const int32_t nTxWeight; //!< ... and avoid recomputing tx weight (also used for GetTxSize())
const size_t nUsageSize; //!< ... and total memory usage
@ -87,30 +83,15 @@ private:
const unsigned int entryHeight; //!< Chain height when entering the mempool
const bool spendsCoinbase; //!< keep track of transactions that spend a coinbase
const int64_t sigOpCost; //!< Total sigop cost
CAmount m_modified_fee; //!< Used for determining the priority of the transaction for mining in a block
mutable LockPoints lockPoints; //!< Track the height and time at which tx was final
// Information about descendants of this transaction that are in the
// mempool; if we remove this transaction we must remove all of these
// descendants as well.
int64_t m_count_with_descendants{1}; //!< number of descendant transactions
// Using int64_t instead of int32_t to avoid signed integer overflow issues.
int64_t nSizeWithDescendants; //!< ... and size
CAmount nModFeesWithDescendants; //!< ... and total fees (all including us)
// Analogous statistics for ancestor transactions
int64_t m_count_with_ancestors{1};
// Using int64_t instead of int32_t to avoid signed integer overflow issues.
int64_t nSizeWithAncestors;
CAmount nModFeesWithAncestors;
int64_t nSigOpCostWithAncestors;
public:
CTxMemPoolEntry(const CTransactionRef& tx, CAmount fee,
int64_t time, unsigned int entry_height, uint64_t entry_sequence,
bool spends_coinbase,
int64_t sigops_cost, LockPoints lp)
: tx{tx},
: TxEntry(GetVirtualTransactionSize(GetTransactionWeight(*tx), sigops_cost, ::nBytesPerSigOp), fee),
tx{tx},
nFee{fee},
nTxWeight{GetTransactionWeight(*tx)},
nUsageSize{RecursiveDynamicUsage(tx)},
@ -119,13 +100,7 @@ public:
entryHeight{entry_height},
spendsCoinbase{spends_coinbase},
sigOpCost{sigops_cost},
m_modified_fee{nFee},
lockPoints{lp},
nSizeWithDescendants{GetTxSize()},
nModFeesWithDescendants{nFee},
nSizeWithAncestors{GetTxSize()},
nModFeesWithAncestors{nFee},
nSigOpCostWithAncestors{sigOpCost} {}
lockPoints{lp} {}
CTxMemPoolEntry(ExplicitCopyTag, const CTxMemPoolEntry& entry) : CTxMemPoolEntry(entry) {}
CTxMemPoolEntry& operator=(const CTxMemPoolEntry&) = delete;
@ -137,10 +112,6 @@ public:
const CTransaction& GetTx() const { return *this->tx; }
CTransactionRef GetSharedTx() const { return this->tx; }
const CAmount& GetFee() const { return nFee; }
int32_t GetTxSize() const
{
return GetVirtualTransactionSize(nTxWeight, sigOpCost, ::nBytesPerSigOp);
}
int32_t GetTxWeight() const { return nTxWeight; }
std::chrono::seconds GetTime() const { return std::chrono::seconds{nTime}; }
unsigned int GetHeight() const { return entryHeight; }
@ -150,15 +121,9 @@ public:
size_t DynamicMemoryUsage() const { return nUsageSize; }
const LockPoints& GetLockPoints() const { return lockPoints; }
// Adjusts the descendant state.
void UpdateDescendantState(int32_t modifySize, CAmount modifyFee, int64_t modifyCount);
// Adjusts the ancestor state
void UpdateAncestorState(int32_t modifySize, CAmount modifyFee, int64_t modifyCount, int64_t modifySigOps);
// Updates the modified fees with descendants/ancestors.
void UpdateModifiedFee(CAmount fee_diff)
{
nModFeesWithDescendants = SaturatingAdd(nModFeesWithDescendants, fee_diff);
nModFeesWithAncestors = SaturatingAdd(nModFeesWithAncestors, fee_diff);
m_modified_fee = SaturatingAdd(m_modified_fee, fee_diff);
}
@ -168,24 +133,13 @@ public:
lockPoints = lp;
}
uint64_t GetCountWithDescendants() const { return m_count_with_descendants; }
int64_t GetSizeWithDescendants() const { return nSizeWithDescendants; }
CAmount GetModFeesWithDescendants() const { return nModFeesWithDescendants; }
bool GetSpendsCoinbase() const { return spendsCoinbase; }
uint64_t GetCountWithAncestors() const { return m_count_with_ancestors; }
int64_t GetSizeWithAncestors() const { return nSizeWithAncestors; }
CAmount GetModFeesWithAncestors() const { return nModFeesWithAncestors; }
int64_t GetSigOpCostWithAncestors() const { return nSigOpCostWithAncestors; }
const Parents& GetMemPoolParentsConst() const { return m_parents; }
const Children& GetMemPoolChildrenConst() const { return m_children; }
Parents& GetMemPoolParents() const { return m_parents; }
Children& GetMemPoolChildren() const { return m_children; }
// XXX: we should move all topology calculations into the mempool, and
// eliminate this accessor. This is only needed for v3_policy checks, which
// could be reimplemented within the mempool itself.
mutable size_t idx_randomized; //!< Index in mempool's txns_randomized
mutable Epoch::Marker m_epoch_marker; //!< epoch when last touched, useful for graph algorithms
mutable Epoch::Marker mempool_epoch_marker; //!< epoch when last touched
};
using CTxMemPoolEntryRef = CTxMemPoolEntry::CTxMemPoolEntryRef;

View File

@ -4,6 +4,7 @@
#ifndef BITCOIN_KERNEL_MEMPOOL_LIMITS_H
#define BITCOIN_KERNEL_MEMPOOL_LIMITS_H
#include <kernel/txgraph.h>
#include <policy/policy.h>
#include <cstdint>
@ -15,7 +16,7 @@ namespace kernel {
*
* Most of the time, this struct should be referenced as CTxMemPool::Limits.
*/
struct MemPoolLimits {
struct MemPoolLimits : GraphLimits {
//! The maximum allowed number of transactions in a package including the entry and its ancestors.
int64_t ancestor_count{DEFAULT_ANCESTOR_LIMIT};
//! The maximum allowed size in virtual bytes of an entry and its ancestors within a package.
@ -31,7 +32,7 @@ struct MemPoolLimits {
static constexpr MemPoolLimits NoLimits()
{
int64_t no_limit{std::numeric_limits<int64_t>::max()};
return {no_limit, no_limit, no_limit, no_limit};
return {{no_limit, no_limit}, no_limit, no_limit, no_limit, no_limit};
}
};
} // namespace kernel

1352
src/kernel/txgraph.cpp Normal file

File diff suppressed because it is too large Load Diff

387
src/kernel/txgraph.h Normal file
View File

@ -0,0 +1,387 @@
#ifndef BITCOIN_KERNEL_TXGRAPH_H
#define BITCOIN_KERNEL_TXGRAPH_H
#include <util/feefrac.h>
#include <policy/feerate.h>
#include <consensus/amount.h>
#include <policy/policy.h>
#include <util/epochguard.h>
#include <memusage.h>
#include <sync.h>
#include <util/check.h>
#include <boost/multi_index_container.hpp>
#include <boost/multi_index/identity.hpp>
#include <boost/multi_index/indexed_by.hpp>
#include <boost/multi_index/ordered_index.hpp>
#include <boost/multi_index/sequenced_index.hpp>
#include <boost/multi_index/tag.hpp>
#include <boost/multi_index/member.hpp>
#include <atomic>
#include <functional>
#include <list>
#include <set>
#include <utility>
using namespace std;
class TxGraphCluster;
class TxGraph;
static std::atomic<int64_t> unique_id_counter{0};
class TxEntry {
public:
typedef std::reference_wrapper<const TxEntry> TxEntryRef;
typedef std::pair<size_t, std::list<TxEntryRef>::iterator> Location;
struct CompareById {
bool operator()(const TxEntryRef& a, const TxEntryRef& b) const {
return a.get().unique_id < b.get().unique_id;
}
};
typedef std::set<TxEntryRef, CompareById> TxEntryParents;
typedef std::set<TxEntryRef, CompareById> TxEntryChildren;
TxEntry(int32_t vsize, CAmount modified_fee)
: m_virtual_size(vsize), m_modified_fee(modified_fee) {}
virtual ~TxEntry() {}
int64_t unique_id{++unique_id_counter};
int32_t m_virtual_size;
CAmount m_modified_fee; //!< Tx fee (including prioritisetransaction effects)
int32_t GetTxSize() const { return m_virtual_size; }
CAmount GetModifiedFee() const { return m_modified_fee; }
TxEntryParents& GetTxEntryParents() const { return parents; }
TxEntryChildren& GetTxEntryChildren() const { return children; }
mutable Location m_loc; //!< Location within a cluster
mutable TxGraphCluster *m_cluster{nullptr}; //! The cluster this entry belongs to
private:
// Note: It's a little weird to store parent and children information in
// the TxEntry, because the notion of which transactions are connected is
// one that exists at the cluster/graph level, rather than the transaction
// level. In particular, if a transaction is being evaluated for RBF, then
// it's possible that some other transaction (eg a common parent) might
// have two different possible descendants, depending on which transaction
// ultimately is accepted to the mempool.
// Fortunately, for now this implementation doesn't relay on child
// information, only parent information, for being able to invoke the
// cluster_linearize sorting algorithm. Since parent information is correct
// and currently unambiguous for RBF evaluation, this implementation should
// work, but this could break in the future if (eg) we wanted to implement
// RBF'ing a transaction with some other transaction that had the same txid
// (eg smaller witness replacement, where child transactions would not need
// to be evicted).
// Maybe sipa's implementation will move this information from the
// transaction to the cluster, and eliminate this confusion?
mutable TxEntryParents parents;
mutable TxEntryChildren children;
mutable Epoch::Marker m_epoch_marker; //!< epoch when last touched
friend class TxGraph;
};
class TxGraphCluster {
public:
TxGraphCluster(int64_t id, TxGraph *tx_graph) : m_id(id), m_tx_graph(tx_graph) {}
void Clear() {
m_chunks.clear();
m_tx_count = 0;
}
// Add a transaction and update the sort.
void AddTransaction(const TxEntry& entry, bool sort);
void RemoveTransaction(const TxEntry& entry);
// Sort the cluster and partition into chunks.
void Sort(bool reassign_locations = true);
// Just rechunk the cluster using its existing linearization.
void Rechunk();
// Permanently assign transactions to this cluster
void AssignTransactions();
// Sanity checks -- verify metadata matches and clusters are topo-sorted.
bool Check() const;
bool CheckTopo() const;
private:
// Helper function
void RechunkFromLinearization(std::vector<TxEntry::TxEntryRef>& txs, bool reassign_locations);
public:
void Merge(std::vector<TxGraphCluster *>::iterator first, std::vector<TxGraphCluster*>::iterator last, bool this_cluster_first);
void MergeCopy(std::vector<TxGraphCluster *>::const_iterator first, std::vector<TxGraphCluster*>::const_iterator last);
uint64_t GetMemoryUsage() const {
return memusage::DynamicUsage(m_chunks) + m_tx_count * sizeof(void*) * 3;
}
TxEntry::TxEntryRef GetLastTransaction();
// The chunks of transactions which will be added to blocks or
// evicted from the mempool.
struct Chunk {
Chunk(CAmount _fee, int64_t _size) : feerate(_fee, _size) {}
Chunk(Chunk&& other) = default;
Chunk& operator=(TxGraphCluster::Chunk&& other) = default;
Chunk& operator=(const TxGraphCluster::Chunk& other) = delete;
FeeFrac feerate; // The fee/size of this chunk
std::list<TxEntry::TxEntryRef> txs;
};
typedef std::vector<Chunk>::iterator ChunkIter;
typedef std::pair<ChunkIter, TxGraphCluster*> HeapEntry;
std::vector<Chunk> m_chunks;
int64_t m_tx_count{0};
int64_t m_tx_size{0};
const int64_t m_id;
mutable Epoch::Marker m_epoch_marker; //!< epoch when last touched
TxGraph *m_tx_graph{nullptr};
};
class Trimmer {
public:
Trimmer(TxGraph *tx_graph);
~Trimmer();
CFeeRate RemoveWorstChunk(std::vector<TxEntry::TxEntryRef>& txs_to_remove);
private:
std::set<TxGraphCluster*> clusters_with_evictions;
TxGraph *m_tx_graph{nullptr};
};
class TxSelector {
public:
TxSelector(const TxGraph *tx_graph);
~TxSelector();
// Return the next chunk in the mempool that is at most max_vsize in size.
FeeFrac SelectNextChunk(std::vector<TxEntry::TxEntryRef>& txs);
// If the transactions were successfully used, then notify the TxSelector
// to keep selecting transactions from the same cluster.
void Success();
static bool ChunkCompare(const TxGraphCluster::HeapEntry& a, const TxGraphCluster::HeapEntry& b) {
return a.first->feerate < b.first->feerate;
}
private:
std::vector<TxGraphCluster::HeapEntry> heap_chunks;
const TxGraph *m_tx_graph{nullptr};
TxGraphCluster::HeapEntry m_last_entry_selected{TxGraphCluster::ChunkIter(), nullptr};
};
struct GraphLimits {
//! The maximum number of transactions in a cluster.
int64_t cluster_count{DEFAULT_CLUSTER_LIMIT};
//! The maximum allowed size in virtual bytes of a cluster.
int64_t cluster_size_vbytes{DEFAULT_CLUSTER_SIZE_LIMIT_KVB*1'000};
};
class TxGraphChangeSet {
public:
TxGraphChangeSet(TxGraph *tx_graph, GraphLimits limits, const std::vector<TxEntry::TxEntryRef>& to_remove);
~TxGraphChangeSet();
// Returns failure if a cluster size limit would be hit.
bool AddTx(TxEntry::TxEntryRef tx, const std::vector<TxEntry::TxEntryRef> parents);
void GetFeerateDiagramOld(std::vector<FeeFrac> &diagram);
void GetFeerateDiagramNew(std::vector<FeeFrac> &diagram);
void Apply(); // Apply this changeset to the txgraph, adding/removing
// transactions and clusters as needed.
private:
void GetFeerateDiagram(std::vector<FeeFrac> &diagram, const std::vector<TxGraphCluster*>& clusters);
void SortNewClusters();
TxGraph *m_tx_graph{nullptr};
GraphLimits m_limits;
std::map<int64_t, TxGraphCluster *> m_tx_to_cluster_map; // map entries to their new clusters
std::set<int64_t> m_new_clusters; // cluster id's of the new clusters
std::vector<TxEntry::TxEntryRef> m_txs_to_add;
std::vector<TxEntry::TxEntryRef> m_txs_to_remove;
std::vector<TxGraphCluster *> m_clusters_to_delete;
bool m_sort_new_clusters{true};
};
class TxGraph {
public:
TxGraph() {}
// (lazily?) add a transaction to the graph (assume no in-mempool children?)
void AddTx(TxEntry *new_tx, int32_t vsize, CAmount modified_fee, const std::vector<TxEntry::TxEntryRef>& parents);
// Lazily remove a transaction from the graph
void RemoveTx(TxEntry::TxEntryRef remove_tx) EXCLUSIVE_LOCKS_REQUIRED(cs);
void RemoveBatch(std::vector<TxEntry::TxEntryRef> &txs_removed);
// add a group of parent transactions, but limit resulting cluster sizes.
void AddParentTxs(std::vector<TxEntry::TxEntryRef> parent_txs, GraphLimits limits, std::function<std::vector<TxEntry::TxEntryRef>(TxEntry::TxEntryRef)> func, std::vector<TxEntry::TxEntryRef> &txs_removed);
private:
std::pair<std::vector<TxGraphCluster*>, std::vector<TxGraphCluster*>> GetAllConnectedClusters(TxGraphCluster *target) EXCLUSIVE_LOCKS_REQUIRED(cs);
public:
// Evict the last chunk from the given cluster.
// We need to do this iteratively, so lazy updating of state would be better.
void RemoveChunkForEviction(TxGraphCluster *cluster) EXCLUSIVE_LOCKS_REQUIRED(cs);
void UpdateForPrioritisedTransaction(const TxEntry& tx);
std::vector<TxEntry::TxEntryRef> GetAncestors(const std::vector<TxEntry::TxEntryRef>& txs) const;
std::vector<TxEntry::TxEntryRef> GetDescendants(const std::vector<TxEntry::TxEntryRef>& txs) const;
// Return all transactions in the clusters that the given transactions are part of.
std::vector<TxEntry::TxEntryRef> GatherAllClusterTransactions(const std::vector<TxEntry::TxEntryRef> &txs) const;
void GetClusterSize(const std::vector<TxEntry::TxEntryRef>& parents, int64_t &cluster_size, int64_t &cluster_count) const;
TxGraphCluster* GetClusterById(int64_t id) const EXCLUSIVE_LOCKS_REQUIRED(cs) {
auto it = m_cluster_map.find(id);
if (it != m_cluster_map.end()) return it->second.get();
return nullptr;
}
uint64_t GetClusterCount() const { LOCK(cs); return m_cluster_map.size(); }
uint64_t GetClusterCount(const TxEntry& tx) const { return tx.m_cluster->m_tx_count; }
void Check(GraphLimits limits) const; // sanity checks
void CheckMemory() const;
bool HasDescendants(const TxEntry& tx) const {
return tx.GetTxEntryChildren().size() > 0;
}
// TODO: add test coverage
bool CompareMiningScore(const TxEntry& a, const TxEntry& b) const {
if (&a == &b) return false;
FeeFrac a_frac = a.m_cluster->m_chunks[a.m_loc.first].feerate;
FeeFrac b_frac = b.m_cluster->m_chunks[b.m_loc.first].feerate;
if (a_frac != b_frac) {
return a_frac > b_frac;
} else if (a.m_cluster != b.m_cluster) {
// Equal scores in different clusters; sort by cluster id.
return a.m_cluster->m_id < b.m_cluster->m_id;
//return a->GetTx().GetHash() < b->GetTx().GetHash();
} else if (a.m_loc.first != b.m_loc.first) {
// Equal scores in same cluster; sort by chunk index.
return a.m_loc.first < b.m_loc.first;
} else {
// Equal scores in same cluster and chunk; sort by position in chunk.
for (auto it = a.m_cluster->m_chunks[a.m_loc.first].txs.begin();
it != a.m_cluster->m_chunks[a.m_loc.first].txs.end(); ++it) {
if (&(it->get()) == &a) return true;
if (&(it->get()) == &b) return false;
}
}
Assume(false); // this should not be reachable.
return true;
}
private:
struct worst_chunk {};
struct best_chunk {};
struct id {};
class CompareTxGraphClusterByWorstChunk {
public:
bool operator()(const TxGraphCluster& a, const TxGraphCluster& b) const
{
return operator()(&a, &b);
}
bool operator()(const TxGraphCluster* a, const TxGraphCluster* b) const
{
return a->m_chunks.back().feerate < b->m_chunks.back().feerate;
}
};
class CompareTxGraphClusterByBestChunk {
public:
bool operator()(const TxGraphCluster& a, const TxGraphCluster& b) const
{
return operator()(&a, &b);
}
bool operator()(const TxGraphCluster* a, const TxGraphCluster* b) const
{
return a->m_chunks.back().feerate > b->m_chunks.back().feerate;
}
};
typedef boost::multi_index_container<
TxGraphCluster*,
boost::multi_index::indexed_by<
// sorted by lowest chunk feerate
boost::multi_index::ordered_non_unique<
boost::multi_index::tag<worst_chunk>,
boost::multi_index::identity<TxGraphCluster>,
CompareTxGraphClusterByWorstChunk
>,
// sorted by highest chunk feerate
boost::multi_index::ordered_non_unique<
boost::multi_index::tag<best_chunk>,
boost::multi_index::identity<TxGraphCluster>,
CompareTxGraphClusterByBestChunk
>,
boost::multi_index::ordered_unique<
boost::multi_index::tag<id>,
boost::multi_index::member<TxGraphCluster, const int64_t, &TxGraphCluster::m_id>
>
>
> indexed_cluster_set;
indexed_cluster_set m_cluster_index;
void EraseCluster(TxGraphCluster* c);
void UpdateClusterIndex(TxGraphCluster* c);
// Create a new (empty) cluster in the cluster map, and return a pointer to it.
TxGraphCluster* AssignTxGraphCluster() EXCLUSIVE_LOCKS_REQUIRED(cs);
bool visited(const TxEntry& entry) const EXCLUSIVE_LOCKS_REQUIRED(cs, m_epoch)
{
return m_epoch.visited(entry.m_epoch_marker);
}
bool visited(TxGraphCluster *cluster) const EXCLUSIVE_LOCKS_REQUIRED(cs, m_epoch)
{
return m_epoch.visited(cluster->m_epoch_marker);
}
void RecalculateTxGraphClusterAndMaybeSort(TxGraphCluster *cluster, bool sort) EXCLUSIVE_LOCKS_REQUIRED(cs);
void TopoSort(std::vector<TxEntry::TxEntryRef>& to_be_sorted) const;
void UpdateParent(TxEntry::TxEntryRef entry, TxEntry::TxEntryRef parent, bool add) EXCLUSIVE_LOCKS_REQUIRED(cs);
void UpdateChild(TxEntry::TxEntryRef entry, TxEntry::TxEntryRef child, bool add) EXCLUSIVE_LOCKS_REQUIRED(cs);
public:
uint64_t GetInnerUsage() const { LOCK(cs); return cachedInnerUsage; }
mutable RecursiveMutex cs; // TODO: figure out how this could be private? used by rpc code, bleh
const std::unordered_map<int64_t, std::unique_ptr<TxGraphCluster>>& GetClusterMap() const EXCLUSIVE_LOCKS_REQUIRED(cs) { return m_cluster_map; }
private:
// TxGraphClusters
std::unordered_map<int64_t, std::unique_ptr<TxGraphCluster>> m_cluster_map GUARDED_BY(cs);
int64_t m_next_cluster_id GUARDED_BY(cs){0};
mutable Epoch m_epoch GUARDED_BY(cs){};
uint64_t cachedInnerUsage GUARDED_BY(cs){0};
friend class Trimmer;
friend class TxGraphCluster;
friend class TxSelector;
friend class TxGraphChangeSet;
};
#endif // BITCOIN_KERNEL_TXGRAPH_H

View File

@ -5527,7 +5527,7 @@ public:
{
/* As std::make_heap produces a max-heap, we want the entries with the
* fewest ancestors/highest fee to sort later. */
return mp->CompareDepthAndScore(*b, *a, m_wtxid_relay);
return mp->CompareMiningScoreWithTopology(*b, *a, m_wtxid_relay);
}
};
} // namespace

View File

@ -654,10 +654,7 @@ public:
bool hasDescendantsInMempool(const uint256& txid) override
{
if (!m_node.mempool) return false;
LOCK(m_node.mempool->cs);
const auto entry{m_node.mempool->GetEntry(Txid::FromUint256(txid))};
if (entry == nullptr) return false;
return entry->GetCountWithDescendants() > 1;
return m_node.mempool->HasDescendants(Txid::FromUint256(txid));
}
bool broadcastTransaction(const CTransactionRef& tx,
const CAmount& max_tx_fee,
@ -670,11 +667,11 @@ public:
// that Chain clients do not need to know about.
return TransactionError::OK == err;
}
void getTransactionAncestry(const uint256& txid, size_t& ancestors, size_t& descendants, size_t* ancestorsize, CAmount* ancestorfees) override
void getTransactionAncestry(const uint256& txid, size_t& ancestors, size_t& clustersize, size_t* ancestorsize, CAmount* ancestorfees) override
{
ancestors = descendants = 0;
ancestors = clustersize = 0;
if (!m_node.mempool) return;
m_node.mempool->GetTransactionAncestry(txid, ancestors, descendants, ancestorsize, ancestorfees);
m_node.mempool->GetTransactionAncestry(txid, ancestors, clustersize, ancestorsize, ancestorfees);
}
std::map<COutPoint, CAmount> calculateIndividualBumpFees(const std::vector<COutPoint>& outpoints, const CFeeRate& target_feerate) override

View File

@ -27,6 +27,10 @@ using kernel::MemPoolOptions;
namespace {
void ApplyArgsManOptions(const ArgsManager& argsman, MemPoolLimits& mempool_limits)
{
mempool_limits.cluster_count = argsman.GetIntArg("-limitclustercount", mempool_limits.cluster_count);
if (auto vkb = argsman.GetIntArg("-limitclustersize")) mempool_limits.cluster_size_vbytes = *vkb * 1'000;
mempool_limits.ancestor_count = argsman.GetIntArg("-limitancestorcount", mempool_limits.ancestor_count);
if (auto vkb = argsman.GetIntArg("-limitancestorsize")) mempool_limits.ancestor_size_vbytes = *vkb * 1'000;

View File

@ -92,8 +92,6 @@ BlockAssembler::BlockAssembler(Chainstate& chainstate, const CTxMemPool* mempool
void BlockAssembler::resetBlock()
{
inBlock.clear();
// Reserve space for coinbase tx
nBlockWeight = 4000;
nBlockSigOpsCost = 400;
@ -136,11 +134,9 @@ std::unique_ptr<CBlockTemplate> BlockAssembler::CreateNewBlock(const CScript& sc
pblock->nTime = TicksSinceEpoch<std::chrono::seconds>(NodeClock::now());
m_lock_time_cutoff = pindexPrev->GetMedianTimePast();
int nPackagesSelected = 0;
int nDescendantsUpdated = 0;
if (m_mempool) {
LOCK(m_mempool->cs);
addPackageTxs(*m_mempool, nPackagesSelected, nDescendantsUpdated);
addChunks(*m_mempool);
}
const auto time_1{SteadyClock::now()};
@ -176,26 +172,14 @@ std::unique_ptr<CBlockTemplate> BlockAssembler::CreateNewBlock(const CScript& sc
}
const auto time_2{SteadyClock::now()};
LogPrint(BCLog::BENCH, "CreateNewBlock() packages: %.2fms (%d packages, %d updated descendants), validity: %.2fms (total %.2fms)\n",
Ticks<MillisecondsDouble>(time_1 - time_start), nPackagesSelected, nDescendantsUpdated,
LogPrint(BCLog::BENCH, "CreateNewBlock() chunks: %.2fms, validity: %.2fms (total %.2fms)\n",
Ticks<MillisecondsDouble>(time_1 - time_start),
Ticks<MillisecondsDouble>(time_2 - time_1),
Ticks<MillisecondsDouble>(time_2 - time_start));
return std::move(pblocktemplate);
}
void BlockAssembler::onlyUnconfirmed(CTxMemPool::setEntries& testSet)
{
for (CTxMemPool::setEntries::iterator iit = testSet.begin(); iit != testSet.end(); ) {
// Only test txs not already in the block
if (inBlock.count((*iit)->GetSharedTx()->GetHash())) {
testSet.erase(iit++);
} else {
iit++;
}
}
}
bool BlockAssembler::TestPackage(uint64_t packageSize, int64_t packageSigOpsCost) const
{
// TODO: switch to weight-based accounting for packages instead of vsize-based accounting.
@ -210,222 +194,92 @@ bool BlockAssembler::TestPackage(uint64_t packageSize, int64_t packageSigOpsCost
// Perform transaction-level checks before adding to block:
// - transaction finality (locktime)
bool BlockAssembler::TestPackageTransactions(const CTxMemPool::setEntries& package) const
bool BlockAssembler::TestPackageTransactions(const std::vector<const CTxMemPoolEntry *>& txs) const
{
for (CTxMemPool::txiter it : package) {
if (!IsFinalTx(it->GetTx(), nHeight, m_lock_time_cutoff)) {
for (auto tx : txs) {
if (!IsFinalTx(tx->GetTx(), nHeight, m_lock_time_cutoff)) {
return false;
}
}
return true;
}
void BlockAssembler::AddToBlock(CTxMemPool::txiter iter)
void BlockAssembler::AddToBlock(const CTxMemPoolEntry& entry)
{
pblocktemplate->block.vtx.emplace_back(iter->GetSharedTx());
pblocktemplate->vTxFees.push_back(iter->GetFee());
pblocktemplate->vTxSigOpsCost.push_back(iter->GetSigOpCost());
nBlockWeight += iter->GetTxWeight();
pblocktemplate->block.vtx.emplace_back(entry.GetSharedTx());
pblocktemplate->vTxFees.push_back(entry.GetFee());
pblocktemplate->vTxSigOpsCost.push_back(entry.GetSigOpCost());
nBlockWeight += entry.GetTxWeight();
++nBlockTx;
nBlockSigOpsCost += iter->GetSigOpCost();
nFees += iter->GetFee();
inBlock.insert(iter->GetSharedTx()->GetHash());
nBlockSigOpsCost += entry.GetSigOpCost();
nFees += entry.GetFee();
bool fPrintPriority = gArgs.GetBoolArg("-printpriority", DEFAULT_PRINTPRIORITY);
if (fPrintPriority) {
LogPrintf("fee rate %s txid %s\n",
CFeeRate(iter->GetModifiedFee(), iter->GetTxSize()).ToString(),
iter->GetTx().GetHash().ToString());
CFeeRate(entry.GetModifiedFee(), entry.GetTxSize()).ToString(),
entry.GetTx().GetHash().ToString());
}
}
/** Add descendants of given transactions to mapModifiedTx with ancestor
* state updated assuming given transactions are inBlock. Returns number
* of updated descendants. */
static int UpdatePackagesForAdded(const CTxMemPool& mempool,
const CTxMemPool::setEntries& alreadyAdded,
indexed_modified_transaction_set& mapModifiedTx) EXCLUSIVE_LOCKS_REQUIRED(mempool.cs)
void BlockAssembler::addChunks(const CTxMemPool& mempool)
{
AssertLockHeld(mempool.cs);
int nDescendantsUpdated = 0;
for (CTxMemPool::txiter it : alreadyAdded) {
CTxMemPool::setEntries descendants;
mempool.CalculateDescendants(it, descendants);
// Insert all descendants (not yet in block) into the modified set
for (CTxMemPool::txiter desc : descendants) {
if (alreadyAdded.count(desc)) {
continue;
}
++nDescendantsUpdated;
modtxiter mit = mapModifiedTx.find(desc);
if (mit == mapModifiedTx.end()) {
CTxMemPoolModifiedEntry modEntry(desc);
mit = mapModifiedTx.insert(modEntry).first;
}
mapModifiedTx.modify(mit, update_for_parent_inclusion(it));
}
}
return nDescendantsUpdated;
}
void BlockAssembler::SortForBlock(const CTxMemPool::setEntries& package, std::vector<CTxMemPool::txiter>& sortedEntries)
{
// Sort package by ancestor count
// If a transaction A depends on transaction B, then A's ancestor count
// must be greater than B's. So this is sufficient to validly order the
// transactions for block inclusion.
sortedEntries.clear();
sortedEntries.insert(sortedEntries.begin(), package.begin(), package.end());
std::sort(sortedEntries.begin(), sortedEntries.end(), CompareTxIterByAncestorCount());
}
// This transaction selection algorithm orders the mempool based
// on feerate of a transaction including all unconfirmed ancestors.
// Since we don't remove transactions from the mempool as we select them
// for block inclusion, we need an alternate method of updating the feerate
// of a transaction with its not-yet-selected ancestors as we go.
// This is accomplished by walking the in-mempool descendants of selected
// transactions and storing a temporary modified state in mapModifiedTxs.
// Each time through the loop, we compare the best transaction in
// mapModifiedTxs with the next transaction in the mempool to decide what
// transaction package to work on next.
void BlockAssembler::addPackageTxs(const CTxMemPool& mempool, int& nPackagesSelected, int& nDescendantsUpdated)
{
AssertLockHeld(mempool.cs);
// mapModifiedTx will store sorted packages after they are modified
// because some of their txs are already in the block
indexed_modified_transaction_set mapModifiedTx;
// Keep track of entries that failed inclusion, to avoid duplicate work
std::set<Txid> failedTx;
CTxMemPool::indexed_transaction_set::index<ancestor_score>::type::iterator mi = mempool.mapTx.get<ancestor_score>().begin();
CTxMemPool::txiter iter;
// Limit the number of attempts to add transactions to the block when it is
// close to full; this is just a simple heuristic to finish quickly if the
// mempool has a lot of entries.
const int64_t MAX_CONSECUTIVE_FAILURES = 1000;
int64_t nConsecutiveFailed = 0;
while (mi != mempool.mapTx.get<ancestor_score>().end() || !mapModifiedTx.empty()) {
// First try to find a new transaction in mapTx to evaluate.
//
// Skip entries in mapTx that are already in a block or are present
// in mapModifiedTx (which implies that the mapTx ancestor state is
// stale due to ancestor inclusion in the block)
// Also skip transactions that we've already failed to add. This can happen if
// we consider a transaction in mapModifiedTx and it fails: we can then
// potentially consider it again while walking mapTx. It's currently
// guaranteed to fail again, but as a belt-and-suspenders check we put it in
// failedTx and avoid re-evaluation, since the re-evaluation would be using
// cached size/sigops/fee values that are not actually correct.
/** Return true if given transaction from mapTx has already been evaluated,
* or if the transaction's cached data in mapTx is incorrect. */
if (mi != mempool.mapTx.get<ancestor_score>().end()) {
auto it = mempool.mapTx.project<0>(mi);
assert(it != mempool.mapTx.end());
if (mapModifiedTx.count(it) || inBlock.count(it->GetSharedTx()->GetHash()) || failedTx.count(it->GetSharedTx()->GetHash())) {
++mi;
continue;
}
}
// TODO: wrap this in some kind of mempool call, so that the TxGraph class
// is not exposed? Maybe the results can already be cast to CTxMemPoolEntry
// as well.
TxSelector txselector(&mempool.txgraph);
std::vector<TxEntry::TxEntryRef> selected_transactions;
FeeFrac chunk_feerate;
// Now that mi is not stale, determine which transaction to evaluate:
// the next entry from mapTx, or the best from mapModifiedTx?
bool fUsingModified = false;
chunk_feerate = txselector.SelectNextChunk(selected_transactions);
modtxscoreiter modit = mapModifiedTx.get<ancestor_score>().begin();
if (mi == mempool.mapTx.get<ancestor_score>().end()) {
// We're out of entries in mapTx; use the entry from mapModifiedTx
iter = modit->iter;
fUsingModified = true;
} else {
// Try to compare the mapTx entry to the mapModifiedTx entry
iter = mempool.mapTx.project<0>(mi);
if (modit != mapModifiedTx.get<ancestor_score>().end() &&
CompareTxMemPoolEntryByAncestorFee()(*modit, CTxMemPoolModifiedEntry(iter))) {
// The best entry in mapModifiedTx has higher score
// than the one from mapTx.
// Switch which transaction (package) to consider
iter = modit->iter;
fUsingModified = true;
} else {
// Either no entry in mapModifiedTx, or it's worse than mapTx.
// Increment mi for the next loop iteration.
++mi;
}
}
while (selected_transactions.size() > 0) {
// We skip mapTx entries that are inBlock, and mapModifiedTx shouldn't
// contain anything that is inBlock.
assert(!inBlock.count(iter->GetSharedTx()->GetHash()));
uint64_t packageSize = iter->GetSizeWithAncestors();
CAmount packageFees = iter->GetModFeesWithAncestors();
int64_t packageSigOpsCost = iter->GetSigOpCostWithAncestors();
if (fUsingModified) {
packageSize = modit->nSizeWithAncestors;
packageFees = modit->nModFeesWithAncestors;
packageSigOpsCost = modit->nSigOpCostWithAncestors;
}
if (packageFees < m_options.blockMinFeeRate.GetFee(packageSize)) {
// Everything else we might consider has a lower fee rate
// Check to see if min fee rate is still respected.
if (chunk_feerate.fee < m_options.blockMinFeeRate.GetFee(chunk_feerate.size)) {
// Everything else we might consider has a lower feerate
return;
}
if (!TestPackage(packageSize, packageSigOpsCost)) {
if (fUsingModified) {
// Since we always look at the best entry in mapModifiedTx,
// we must erase failed entries so that we can consider the
// next best entry on the next loop iteration
mapModifiedTx.get<ancestor_score>().erase(modit);
failedTx.insert(iter->GetSharedTx()->GetHash());
}
int64_t package_sig_ops = 0;
std::vector<const CTxMemPoolEntry*> mempool_txs;
for (const auto& t : selected_transactions) {
const CTxMemPoolEntry& tx = dynamic_cast<const CTxMemPoolEntry&>(t.get());
mempool_txs.push_back(&tx);
package_sig_ops += tx.GetSigOpCost();
}
// Check to see if this chunk will fit.
if (!TestPackage(chunk_feerate.size, package_sig_ops) || !TestPackageTransactions(mempool_txs)) {
// This chunk won't fit, so we let it be removed from the heap and
// we'll try the next best.
// TODO: try to break up this chunk into smaller chunks for
// consideration, or re-sort the cluster taking into account the
// remaining size limit.
++nConsecutiveFailed;
if (nConsecutiveFailed > MAX_CONSECUTIVE_FAILURES && nBlockWeight >
m_options.nBlockMaxWeight - 4000) {
// Give up if we're close to full and haven't succeeded in a while
break;
}
continue;
}
} else {
txselector.Success();
auto ancestors{mempool.AssumeCalculateMemPoolAncestors(__func__, *iter, CTxMemPool::Limits::NoLimits(), /*fSearchForParents=*/false)};
onlyUnconfirmed(ancestors);
ancestors.insert(iter);
// Test if all tx's are Final
if (!TestPackageTransactions(ancestors)) {
if (fUsingModified) {
mapModifiedTx.get<ancestor_score>().erase(modit);
failedTx.insert(iter->GetSharedTx()->GetHash());
// This chunk will fit, so add it to the block.
nConsecutiveFailed = 0;
for (const auto& tx : mempool_txs) {
AddToBlock(*tx);
}
continue;
}
// This transaction will make it in; reset the failed counter.
nConsecutiveFailed = 0;
// Package can be added. Sort the entries in a valid order.
std::vector<CTxMemPool::txiter> sortedEntries;
SortForBlock(ancestors, sortedEntries);
for (size_t i = 0; i < sortedEntries.size(); ++i) {
AddToBlock(sortedEntries[i]);
// Erase from the modified set, if present
mapModifiedTx.erase(sortedEntries[i]);
}
++nPackagesSelected;
// Update transactions that depend on each of these
nDescendantsUpdated += UpdatePackagesForAdded(mempool, ancestors, mapModifiedTx);
selected_transactions.clear();
chunk_feerate = txselector.SelectNextChunk(selected_transactions);
}
}
} // namespace node

View File

@ -40,96 +40,6 @@ struct CBlockTemplate
std::vector<unsigned char> vchCoinbaseCommitment;
};
// Container for tracking updates to ancestor feerate as we include (parent)
// transactions in a block
struct CTxMemPoolModifiedEntry {
explicit CTxMemPoolModifiedEntry(CTxMemPool::txiter entry)
{
iter = entry;
nSizeWithAncestors = entry->GetSizeWithAncestors();
nModFeesWithAncestors = entry->GetModFeesWithAncestors();
nSigOpCostWithAncestors = entry->GetSigOpCostWithAncestors();
}
CAmount GetModifiedFee() const { return iter->GetModifiedFee(); }
uint64_t GetSizeWithAncestors() const { return nSizeWithAncestors; }
CAmount GetModFeesWithAncestors() const { return nModFeesWithAncestors; }
size_t GetTxSize() const { return iter->GetTxSize(); }
const CTransaction& GetTx() const { return iter->GetTx(); }
CTxMemPool::txiter iter;
uint64_t nSizeWithAncestors;
CAmount nModFeesWithAncestors;
int64_t nSigOpCostWithAncestors;
};
/** Comparator for CTxMemPool::txiter objects.
* It simply compares the internal memory address of the CTxMemPoolEntry object
* pointed to. This means it has no meaning, and is only useful for using them
* as key in other indexes.
*/
struct CompareCTxMemPoolIter {
bool operator()(const CTxMemPool::txiter& a, const CTxMemPool::txiter& b) const
{
return &(*a) < &(*b);
}
};
struct modifiedentry_iter {
typedef CTxMemPool::txiter result_type;
result_type operator() (const CTxMemPoolModifiedEntry &entry) const
{
return entry.iter;
}
};
// A comparator that sorts transactions based on number of ancestors.
// This is sufficient to sort an ancestor package in an order that is valid
// to appear in a block.
struct CompareTxIterByAncestorCount {
bool operator()(const CTxMemPool::txiter& a, const CTxMemPool::txiter& b) const
{
if (a->GetCountWithAncestors() != b->GetCountWithAncestors()) {
return a->GetCountWithAncestors() < b->GetCountWithAncestors();
}
return CompareIteratorByHash()(a, b);
}
};
typedef boost::multi_index_container<
CTxMemPoolModifiedEntry,
boost::multi_index::indexed_by<
boost::multi_index::ordered_unique<
modifiedentry_iter,
CompareCTxMemPoolIter
>,
// sorted by modified ancestor fee rate
boost::multi_index::ordered_non_unique<
// Reuse same tag from CTxMemPool's similar index
boost::multi_index::tag<ancestor_score>,
boost::multi_index::identity<CTxMemPoolModifiedEntry>,
CompareTxMemPoolEntryByAncestorFee
>
>
> indexed_modified_transaction_set;
typedef indexed_modified_transaction_set::nth_index<0>::type::iterator modtxiter;
typedef indexed_modified_transaction_set::index<ancestor_score>::type::iterator modtxscoreiter;
struct update_for_parent_inclusion
{
explicit update_for_parent_inclusion(CTxMemPool::txiter it) : iter(it) {}
void operator() (CTxMemPoolModifiedEntry &e)
{
e.nModFeesWithAncestors -= iter->GetModifiedFee();
e.nSizeWithAncestors -= iter->GetTxSize();
e.nSigOpCostWithAncestors -= iter->GetSigOpCost();
}
CTxMemPool::txiter iter;
};
/** Generate a new block, without valid proof-of-work */
class BlockAssembler
{
@ -142,7 +52,6 @@ private:
uint64_t nBlockTx;
uint64_t nBlockSigOpsCost;
CAmount nFees;
std::unordered_set<Txid, SaltedTxidHasher> inBlock;
// Chain context for the block
int nHeight;
@ -177,26 +86,22 @@ private:
/** Clear the block's state and prepare for assembling a new block */
void resetBlock();
/** Add a tx to the block */
void AddToBlock(CTxMemPool::txiter iter);
void AddToBlock(const CTxMemPoolEntry& entry);
// Methods for how to add transactions to a block.
/** Add transactions based on feerate including unconfirmed ancestors
* Increments nPackagesSelected / nDescendantsUpdated with corresponding
* statistics from the package selection (for logging statistics). */
void addPackageTxs(const CTxMemPool& mempool, int& nPackagesSelected, int& nDescendantsUpdated) EXCLUSIVE_LOCKS_REQUIRED(mempool.cs);
void addChunks(const CTxMemPool& mempool) EXCLUSIVE_LOCKS_REQUIRED(mempool.cs);
// helper functions for addPackageTxs()
/** Remove confirmed (inBlock) entries from given set */
void onlyUnconfirmed(CTxMemPool::setEntries& testSet);
/** Test if a new package would "fit" in the block */
bool TestPackage(uint64_t packageSize, int64_t packageSigOpsCost) const;
/** Perform checks on each transaction in a package:
* locktime, premature-witness, serialized size (if necessary)
* These checks should always succeed, and they're here
* only as an extra check in case of suboptimal node configuration */
bool TestPackageTransactions(const CTxMemPool::setEntries& package) const;
/** Sort the package in an order that is valid to appear in a block */
void SortForBlock(const CTxMemPool::setEntries& package, std::vector<CTxMemPool::txiter>& sortedEntries);
bool TestPackageTransactions(const std::vector<const CTxMemPoolEntry *>& txs) const;
};
int64_t UpdateTime(CBlockHeader* pblock, const Consensus::Params& consensusParams, const CBlockIndex* pindexPrev);

View File

@ -49,8 +49,7 @@ MiniMiner::MiniMiner(const CTxMemPool& mempool, const std::vector<COutPoint>& ou
//
// Note that the descendants of a transaction include the transaction itself. Also note,
// that this is only calculating bump fees. RBF fee rules should be handled separately.
CTxMemPool::setEntries descendants;
mempool.CalculateDescendants(mempool.GetIter(ptx->GetHash()).value(), descendants);
CTxMemPool::Entries descendants = mempool.CalculateDescendants({mempool.GetIter(ptx->GetHash()).value()});
for (const auto& desc_txiter : descendants) {
m_to_be_replaced.insert(desc_txiter->GetTx().GetHash());
}
@ -77,12 +76,11 @@ MiniMiner::MiniMiner(const CTxMemPool& mempool, const std::vector<COutPoint>& ou
// Add every entry to m_entries_by_txid and m_entries, except the ones that will be replaced.
for (const auto& txiter : cluster) {
if (!m_to_be_replaced.count(txiter->GetTx().GetHash())) {
auto [mapiter, success] = m_entries_by_txid.emplace(txiter->GetTx().GetHash(),
MiniMinerMempoolEntry{/*tx_in=*/txiter->GetSharedTx(),
/*vsize_self=*/txiter->GetTxSize(),
/*vsize_ancestor=*/txiter->GetSizeWithAncestors(),
/*fee_self=*/txiter->GetModifiedFee(),
/*fee_ancestor=*/txiter->GetModFeesWithAncestors()});
size_t ancestor_count{0};
size_t ancestor_size{0};
CAmount ancestor_fee{0};
mempool.CalculateAncestorData(*txiter, ancestor_count, ancestor_size, ancestor_fee);
auto [mapiter, success] = m_entries_by_txid.emplace(txiter->GetTx().GetHash(), MiniMinerMempoolEntry(txiter->GetSharedTx(), txiter->GetTxSize(), int64_t(ancestor_size), txiter->GetModifiedFee(), ancestor_fee));
m_entries.push_back(mapiter);
} else {
auto outpoints_it = m_requested_outpoints_by_txid.find(txiter->GetTx().GetHash());
@ -104,9 +102,7 @@ MiniMiner::MiniMiner(const CTxMemPool& mempool, const std::vector<COutPoint>& ou
// will not exist without its ancestor MiniMinerMempoolEntry, so these sets won't be invalidated.
std::vector<MockEntryMap::iterator> cached_descendants;
const bool remove{m_to_be_replaced.count(txid) > 0};
CTxMemPool::setEntries descendants;
mempool.CalculateDescendants(txiter, descendants);
Assume(descendants.count(txiter) > 0);
CTxMemPool::Entries descendants = mempool.CalculateDescendants({txiter});
for (const auto& desc_txiter : descendants) {
const auto txid_desc = desc_txiter->GetTx().GetHash();
const bool remove_desc{m_to_be_replaced.count(txid_desc) > 0};

View File

@ -55,6 +55,10 @@ static constexpr unsigned int MAX_STANDARD_SCRIPTSIG_SIZE{1650};
static constexpr unsigned int DUST_RELAY_TX_FEE{3000};
/** Default for -minrelaytxfee, minimum relay fee for transactions */
static constexpr unsigned int DEFAULT_MIN_RELAY_TX_FEE{1000};
/** Maximum number of transactions per cluster (default) */
static constexpr unsigned int DEFAULT_CLUSTER_LIMIT{100};
/** Maximum size of cluster in virtual kilobytes */
static constexpr unsigned int DEFAULT_CLUSTER_SIZE_LIMIT_KVB{101};
/** Default for -limitancestorcount, max number of in-mempool ancestors */
static constexpr unsigned int DEFAULT_ANCESTOR_LIMIT{25};
/** Default for -limitancestorsize, maximum kilobytes of tx + all in-mempool ancestors */

View File

@ -39,11 +39,10 @@ RBFTransactionState IsRBFOptIn(const CTransaction& tx, const CTxMemPool& pool)
// If all the inputs have nSequence >= maxint-1, it still might be
// signaled for RBF if any unconfirmed parents have signaled.
const auto& entry{*Assert(pool.GetEntry(tx.GetHash()))};
auto ancestors{pool.AssumeCalculateMemPoolAncestors(__func__, entry, CTxMemPool::Limits::NoLimits(),
/*fSearchForParents=*/false)};
auto ancestors{pool.CalculateMemPoolAncestorsFast(entry, /*fSearchForParents=*/false)};
for (CTxMemPool::txiter it : ancestors) {
if (SignalsOptInRBF(it->GetTx())) {
for (auto tx : ancestors) {
if (SignalsOptInRBF(tx.get().GetTx())) {
return RBFTransactionState::REPLACEABLE_BIP125;
}
}
@ -63,55 +62,23 @@ std::optional<std::string> GetEntriesForConflicts(const CTransaction& tx,
{
AssertLockHeld(pool.cs);
const uint256 txid = tx.GetHash();
uint64_t nConflictingCount = 0;
for (const auto& mi : iters_conflicting) {
nConflictingCount += mi->GetCountWithDescendants();
// Rule #5: don't consider replacing more than MAX_REPLACEMENT_CANDIDATES
// entries from the mempool. This potentially overestimates the number of actual
// descendants (i.e. if multiple conflicts share a descendant, it will be counted multiple
// times), but we just want to be conservative to avoid doing too much work.
if (nConflictingCount > MAX_REPLACEMENT_CANDIDATES) {
return strprintf("rejecting replacement %s; too many potential replacements (%d > %d)\n",
txid.ToString(),
nConflictingCount,
MAX_REPLACEMENT_CANDIDATES);
}
// Rule #5: don't consider replacements that conflict directly with more
// than MAX_REPLACEMENT_CANDIDATES. This gives us a bound on how many
// mempool clusters might need to be re-sorted in order to process the
// replacement, preventing CPU DoS.
if (iters_conflicting.size() > MAX_REPLACEMENT_CANDIDATES) {
return strprintf("rejecting replacement %s; too many direct conflicts (%ud > %d)\n",
txid.ToString(),
iters_conflicting.size(),
MAX_REPLACEMENT_CANDIDATES);
}
// Calculate the set of all transactions that would have to be evicted.
for (CTxMemPool::txiter it : iters_conflicting) {
pool.CalculateDescendants(it, all_conflicts);
}
return std::nullopt;
}
std::optional<std::string> HasNoNewUnconfirmed(const CTransaction& tx,
const CTxMemPool& pool,
const CTxMemPool::setEntries& iters_conflicting)
{
AssertLockHeld(pool.cs);
std::set<uint256> parents_of_conflicts;
for (const auto& mi : iters_conflicting) {
for (const CTxIn& txin : mi->GetTx().vin) {
parents_of_conflicts.insert(txin.prevout.hash);
}
}
for (unsigned int j = 0; j < tx.vin.size(); j++) {
// Rule #2: We don't want to accept replacements that require low feerate junk to be
// mined first. Ideally we'd keep track of the ancestor feerates and make the decision
// based on that, but for now requiring all new inputs to be confirmed works.
//
// Note that if you relax this to make RBF a little more useful, this may break the
// CalculateMempoolAncestors RBF relaxation which subtracts the conflict count/size from the
// descendant limit.
if (!parents_of_conflicts.count(tx.vin[j].prevout.hash)) {
// Rather than check the UTXO set - potentially expensive - it's cheaper to just check
// if the new input refers to a tx that's in the mempool.
if (pool.exists(GenTxid::Txid(tx.vin[j].prevout.hash))) {
return strprintf("replacement %s adds unconfirmed input, idx %d",
tx.GetHash().ToString(), j);
}
}
CTxMemPool::Entries direct_conflicts{iters_conflicting.begin(), iters_conflicting.end()};
auto descendants = pool.CalculateDescendants(direct_conflicts);
for (auto it : descendants) {
all_conflicts.insert(it);
}
return std::nullopt;
}
@ -131,32 +98,6 @@ std::optional<std::string> EntriesAndTxidsDisjoint(const CTxMemPool::setEntries&
return std::nullopt;
}
std::optional<std::string> PaysMoreThanConflicts(const CTxMemPool::setEntries& iters_conflicting,
CFeeRate replacement_feerate,
const uint256& txid)
{
for (const auto& mi : iters_conflicting) {
// Don't allow the replacement to reduce the feerate of the mempool.
//
// We usually don't want to accept replacements with lower feerates than what they replaced
// as that would lower the feerate of the next block. Requiring that the feerate always be
// increased is also an easy-to-reason about way to prevent DoS attacks via replacements.
//
// We only consider the feerates of transactions being directly replaced, not their indirect
// descendants. While that does mean high feerate children are ignored when deciding whether
// or not to replace, we do require the replacement to pay more overall fees too, mitigating
// most cases.
CFeeRate original_feerate(mi->GetModifiedFee(), mi->GetTxSize());
if (replacement_feerate <= original_feerate) {
return strprintf("rejecting replacement %s; new feerate %s <= old feerate %s",
txid.ToString(),
replacement_feerate.ToString(),
original_feerate.ToString());
}
}
return std::nullopt;
}
std::optional<std::string> PaysForRBF(CAmount original_fees,
CAmount replacement_fees,
size_t replacement_vsize,
@ -187,11 +128,10 @@ std::optional<std::string> PaysForRBF(CAmount original_fees,
std::optional<std::pair<DiagramCheckError, std::string>> ImprovesFeerateDiagram(CTxMemPool& pool,
const CTxMemPool::setEntries& direct_conflicts,
const CTxMemPool::setEntries& all_conflicts,
CAmount replacement_fees,
int64_t replacement_vsize)
std::vector<std::pair<CTxMemPoolEntry*, CAmount>> new_entries)
{
// Require that the replacement strictly improves the mempool's feerate diagram.
const auto chunk_results{pool.CalculateChunksForRBF(replacement_fees, replacement_vsize, direct_conflicts, all_conflicts)};
const auto chunk_results{pool.CalculateChunksForRBF(new_entries, direct_conflicts, all_conflicts)};
if (!chunk_results.has_value()) {
return std::make_pair(DiagramCheckError::UNCALCULABLE, util::ErrorString(chunk_results).original);

View File

@ -71,14 +71,6 @@ std::optional<std::string> GetEntriesForConflicts(const CTransaction& tx, CTxMem
CTxMemPool::setEntries& all_conflicts)
EXCLUSIVE_LOCKS_REQUIRED(pool.cs);
/** The replacement transaction may only include an unconfirmed input if that input was included in
* one of the original transactions.
* @returns error message if tx spends unconfirmed inputs not also spent by iters_conflicting,
* otherwise std::nullopt. */
std::optional<std::string> HasNoNewUnconfirmed(const CTransaction& tx, const CTxMemPool& pool,
const CTxMemPool::setEntries& iters_conflicting)
EXCLUSIVE_LOCKS_REQUIRED(pool.cs);
/** Check the intersection between two sets of transactions (a set of mempool entries and a set of
* txids) to make sure they are disjoint.
* @param[in] ancestors Set of mempool entries corresponding to ancestors of the
@ -92,14 +84,6 @@ std::optional<std::string> EntriesAndTxidsDisjoint(const CTxMemPool::setEntries&
const std::set<Txid>& direct_conflicts,
const uint256& txid);
/** Check that the feerate of the replacement transaction(s) is higher than the feerate of each
* of the transactions in iters_conflicting.
* @param[in] iters_conflicting The set of mempool entries.
* @returns error message if fees insufficient, otherwise std::nullopt.
*/
std::optional<std::string> PaysMoreThanConflicts(const CTxMemPool::setEntries& iters_conflicting,
CFeeRate replacement_feerate, const uint256& txid);
/** The replacement transaction must pay more fees than the original transactions. The additional
* fees must pay for the replacement's bandwidth at or above the incremental relay feerate.
* @param[in] original_fees Total modified fees of original transaction(s).
@ -121,15 +105,13 @@ std::optional<std::string> PaysForRBF(CAmount original_fees,
* @param[in] direct_conflicts Set of in-mempool txids corresponding to the direct conflicts i.e.
* input double-spends with the proposed transaction
* @param[in] all_conflicts Set of mempool entries corresponding to all transactions to be evicted
* @param[in] replacement_fees Fees of proposed replacement package
* @param[in] replacement_vsize Size of proposed replacement package
* @param[in] new_entries vector of new txs along with their modified fees
* @returns error type and string if mempool diagram doesn't improve, otherwise std::nullopt.
*/
std::optional<std::pair<DiagramCheckError, std::string>> ImprovesFeerateDiagram(CTxMemPool& pool,
const CTxMemPool::setEntries& direct_conflicts,
const CTxMemPool::setEntries& all_conflicts,
CAmount replacement_fees,
int64_t replacement_vsize)
EXCLUSIVE_LOCKS_REQUIRED(pool.cs);
std::vector<std::pair<CTxMemPoolEntry*, CAmount>> new_entries)
EXCLUSIVE_LOCKS_REQUIRED(pool.cs);
#endif // BITCOIN_POLICY_RBF_H

View File

@ -55,9 +55,9 @@ struct ParentInfo {
{}
};
std::optional<std::string> PackageV3Checks(const CTransactionRef& ptx, int64_t vsize,
std::optional<std::string> PackageV3Checks(const CTxMemPool& pool, const CTransactionRef& ptx, int64_t vsize,
const Package& package,
const CTxMemPool::setEntries& mempool_ancestors)
const CTxMemPool::Entries& mempool_parents)
{
// This function is specialized for these limits, and must be reimplemented if they ever change.
static_assert(V3_ANCESTOR_LIMIT == 2);
@ -67,12 +67,12 @@ std::optional<std::string> PackageV3Checks(const CTransactionRef& ptx, int64_t v
// Now we have all ancestors, so we can start checking v3 rules.
if (ptx->nVersion == 3) {
if (mempool_ancestors.size() + in_package_parents.size() + 1 > V3_ANCESTOR_LIMIT) {
if (mempool_parents.size() + in_package_parents.size() + 1 > V3_ANCESTOR_LIMIT) {
return strprintf("tx %s (wtxid=%s) would have too many ancestors",
ptx->GetHash().ToString(), ptx->GetWitnessHash().ToString());
}
const bool has_parent{mempool_ancestors.size() + in_package_parents.size() > 0};
const bool has_parent{mempool_parents.size() + in_package_parents.size() > 0};
if (has_parent) {
// A v3 child cannot be too large.
if (vsize > V3_CHILD_MAX_VSIZE) {
@ -83,13 +83,13 @@ std::optional<std::string> PackageV3Checks(const CTransactionRef& ptx, int64_t v
// Exactly 1 parent exists, either in mempool or package. Find it.
const auto parent_info = [&] {
if (mempool_ancestors.size() > 0) {
auto& mempool_parent = *mempool_ancestors.begin();
Assume(mempool_parent->GetCountWithDescendants() == 1);
if (mempool_parents.size() > 0) {
auto& mempool_parent = *mempool_parents.begin();
Assume(pool.GetNumChildren(mempool_parent) == 0);
return ParentInfo{mempool_parent->GetTx().GetHash(),
mempool_parent->GetTx().GetWitnessHash(),
mempool_parent->GetTx().nVersion,
/*has_mempool_descendant=*/mempool_parent->GetCountWithDescendants() > 1};
/*has_mempool_descendant=*/pool.GetNumChildren(mempool_parent) > 0};
} else {
auto& parent_index = in_package_parents.front();
auto& package_parent = package.at(parent_index);
@ -139,7 +139,7 @@ std::optional<std::string> PackageV3Checks(const CTransactionRef& ptx, int64_t v
}
} else {
// Non-v3 transactions cannot have v3 parents.
for (auto it : mempool_ancestors) {
for (auto it : mempool_parents) {
if (it->GetTx().nVersion == 3) {
return strprintf("non-v3 tx %s (wtxid=%s) cannot spend from v3 tx %s (wtxid=%s)",
ptx->GetHash().ToString(), ptx->GetWitnessHash().ToString(),
@ -159,13 +159,14 @@ std::optional<std::string> PackageV3Checks(const CTransactionRef& ptx, int64_t v
return std::nullopt;
}
std::optional<std::pair<std::string, CTransactionRef>> SingleV3Checks(const CTransactionRef& ptx,
const CTxMemPool::setEntries& mempool_ancestors,
std::optional<std::pair<std::string, CTransactionRef>> SingleV3Checks(const CTxMemPool& pool, const CTransactionRef& ptx,
const std::set<Txid>& direct_conflicts,
int64_t vsize)
{
CTxMemPool::Entries parents = pool.CalculateParentsOf(*ptx);
// Check v3 and non-v3 inheritance.
for (const auto& entry : mempool_ancestors) {
for (const auto& entry : parents) {
if (ptx->nVersion != 3 && entry->GetTx().nVersion == 3) {
return std::make_pair(strprintf("non-v3 tx %s (wtxid=%s) cannot spend from v3 tx %s (wtxid=%s)",
ptx->GetHash().ToString(), ptx->GetWitnessHash().ToString(),
@ -187,14 +188,14 @@ std::optional<std::pair<std::string, CTransactionRef>> SingleV3Checks(const CTra
if (ptx->nVersion != 3) return std::nullopt;
// Check that V3_ANCESTOR_LIMIT would not be violated.
if (mempool_ancestors.size() + 1 > V3_ANCESTOR_LIMIT) {
if (parents.size() + 1 > V3_ANCESTOR_LIMIT) {
return std::make_pair(strprintf("tx %s (wtxid=%s) would have too many ancestors",
ptx->GetHash().ToString(), ptx->GetWitnessHash().ToString()),
nullptr);
}
// Remaining checks only pertain to transactions with unconfirmed ancestors.
if (mempool_ancestors.size() > 0) {
if (parents.size() > 0) {
// If this transaction spends V3 parents, it cannot be too large.
if (vsize > V3_CHILD_MAX_VSIZE) {
return std::make_pair(strprintf("v3 child tx %s (wtxid=%s) is too big: %u > %u virtual bytes",
@ -203,25 +204,32 @@ std::optional<std::pair<std::string, CTransactionRef>> SingleV3Checks(const CTra
}
// Check the descendant counts of in-mempool ancestors.
const auto& parent_entry = *mempool_ancestors.begin();
const auto& parent_entry = parents[0];
// If we have a single parent, that transaction may not have any of its own parents.
if (pool.GetParents(*parent_entry).size() > 0) {
return std::make_pair(strprintf("tx %s (wtxid=%s) would have too many ancestors",
ptx->GetHash().ToString(), ptx->GetWitnessHash().ToString()), nullptr);
}
// If there are any ancestors, this is the only child allowed. The parent cannot have any
// other descendants. We handle the possibility of multiple children as that case is
// possible through a reorg.
const auto& children = parent_entry->GetMemPoolChildrenConst();
const auto& children = pool.GetChildren(*parent_entry);
// Don't double-count a transaction that is going to be replaced. This logic assumes that
// any descendant of the V3 transaction is a direct child, which makes sense because a V3
// transaction can only have 1 descendant.
const bool child_will_be_replaced = !children.empty() &&
std::any_of(children.cbegin(), children.cend(),
[&direct_conflicts](const CTxMemPoolEntry& child){return direct_conflicts.count(child.GetTx().GetHash()) > 0;});
if (parent_entry->GetCountWithDescendants() + 1 > V3_DESCENDANT_LIMIT && !child_will_be_replaced) {
if (pool.GetNumChildren(parent_entry) + 2 > V3_DESCENDANT_LIMIT && !child_will_be_replaced) {
// Allow sibling eviction for v3 transaction: if another child already exists, even if
// we don't conflict inputs with it, consider evicting it under RBF rules. We rely on v3 rules
// only permitting 1 descendant, as otherwise we would need to have logic for deciding
// which descendant to evict. Skip if this isn't true, e.g. if the transaction has
// multiple children or the sibling also has descendants due to a reorg.
const bool consider_sibling_eviction{parent_entry->GetCountWithDescendants() == 2 &&
children.begin()->get().GetCountWithAncestors() == 2};
const bool consider_sibling_eviction{pool.GetNumChildren(parent_entry) == 1 &&
pool.GetNumChildren(children.begin()->get()) == 0};
// Return the sibling if its eviction can be considered. Provide the "descendant count
// limit" string either way, as the caller may decide not to do sibling eviction.

View File

@ -42,7 +42,7 @@ static_assert(V3_CHILD_MAX_VSIZE + MAX_STANDARD_TX_WEIGHT / WITNESS_SCALE_FACTOR
* V3_CHILD_MAX_VSIZE.
*
*
* @param[in] mempool_ancestors The in-mempool ancestors of ptx.
* @param[in] pool A reference to the mempool.
* @param[in] direct_conflicts In-mempool transactions this tx conflicts with. These conflicts
* are used to more accurately calculate the resulting descendant
* count of in-mempool ancestors.
@ -56,8 +56,7 @@ static_assert(V3_CHILD_MAX_VSIZE + MAX_STANDARD_TX_WEIGHT / WITNESS_SCALE_FACTOR
* - debug string + nullptr if this transaction violates some v3 rule and sibling eviction is not
* applicable.
*/
std::optional<std::pair<std::string, CTransactionRef>> SingleV3Checks(const CTransactionRef& ptx,
const CTxMemPool::setEntries& mempool_ancestors,
std::optional<std::pair<std::string, CTransactionRef>> SingleV3Checks(const CTxMemPool& pool, const CTransactionRef& ptx,
const std::set<Txid>& direct_conflicts,
int64_t vsize);
@ -82,8 +81,8 @@ std::optional<std::pair<std::string, CTransactionRef>> SingleV3Checks(const CTra
*
* @returns debug string if an error occurs, std::nullopt otherwise.
* */
std::optional<std::string> PackageV3Checks(const CTransactionRef& ptx, int64_t vsize,
std::optional<std::string> PackageV3Checks(const CTxMemPool& pool, const CTransactionRef& ptx, int64_t vsize,
const Package& package,
const CTxMemPool::setEntries& mempool_ancestors);
const CTxMemPool::Entries& mempool_parents);
#endif // BITCOIN_POLICY_V3_POLICY_H

View File

@ -66,6 +66,7 @@ static const CRPCConvertParam vRPCConvertParams[] =
{ "getbalance", 3, "avoid_reuse" },
{ "getblockfrompeer", 1, "peer_id" },
{ "getblockhash", 0, "height" },
{ "getmempoolcluster", 0, "id" },
{ "waitforblockheight", 0, "height" },
{ "waitforblockheight", 1, "timeout" },
{ "waitforblock", 1, "timeout" },

View File

@ -248,53 +248,109 @@ static RPCHelpMan testmempoolaccept()
};
}
static std::vector<RPCResult> ClusterDescription()
{
return {
RPCResult{RPCResult::Type::NUM, "vsize", "virtual transaction size as defined in BIP 141. This is different from actual serialized size for witness transactions as witness data is discounted."},
RPCResult{RPCResult::Type::NUM, "txcount", "number of transactions (including this one)"},
RPCResult{RPCResult::Type::NUM, "clusterid", "id of the cluster containing this tx"},
RPCResult{RPCResult::Type::ARR, "chunks", "the cluster's chunks",
{RPCResult{RPCResult::Type::OBJ, "chunkentry", "",
{
RPCResult{RPCResult::Type::STR_AMOUNT, "fee", "fee of this chunk"},
RPCResult{RPCResult::Type::NUM, "vsize", "virtual size of this chunk"},
RPCResult{RPCResult::Type::NUM, "feerate", "fee rate in " + CURRENCY_UNIT + "/kvB"},
RPCResult{RPCResult::Type::ARR, "txids", "txids in this chunk in sorted order",
{RPCResult{RPCResult::Type::STR_HEX, "transactionid", "the transaction id"}}}
}}
}
}
};
}
static std::vector<RPCResult> MempoolEntryDescription()
{
return {
RPCResult{RPCResult::Type::NUM, "vsize", "virtual transaction size as defined in BIP 141. This is different from actual serialized size for witness transactions as witness data is discounted."},
RPCResult{RPCResult::Type::NUM, "weight", "transaction weight as defined in BIP 141."},
RPCResult{RPCResult::Type::NUM_TIME, "time", "local time transaction entered pool in seconds since 1 Jan 1970 GMT"},
RPCResult{RPCResult::Type::NUM, "height", "block height when transaction entered pool"},
RPCResult{RPCResult::Type::NUM, "descendantcount", "number of in-mempool descendant transactions (including this one)"},
RPCResult{RPCResult::Type::NUM, "descendantsize", "virtual transaction size of in-mempool descendants (including this one)"},
RPCResult{RPCResult::Type::NUM, "ancestorcount", "number of in-mempool ancestor transactions (including this one)"},
RPCResult{RPCResult::Type::NUM, "ancestorsize", "virtual transaction size of in-mempool ancestors (including this one)"},
RPCResult{RPCResult::Type::STR_HEX, "wtxid", "hash of serialized transaction, including witness data"},
RPCResult{RPCResult::Type::OBJ, "fees", "",
{
RPCResult{RPCResult::Type::STR_AMOUNT, "base", "transaction fee, denominated in " + CURRENCY_UNIT},
RPCResult{RPCResult::Type::STR_AMOUNT, "modified", "transaction fee with fee deltas used for mining priority, denominated in " + CURRENCY_UNIT},
RPCResult{RPCResult::Type::STR_AMOUNT, "ancestor", "transaction fees of in-mempool ancestors (including this one) with fee deltas used for mining priority, denominated in " + CURRENCY_UNIT},
RPCResult{RPCResult::Type::STR_AMOUNT, "descendant", "transaction fees of in-mempool descendants (including this one) with fee deltas used for mining priority, denominated in " + CURRENCY_UNIT},
}},
RPCResult{RPCResult::Type::ARR, "depends", "unconfirmed transactions used as inputs for this transaction",
{RPCResult{RPCResult::Type::STR_HEX, "transactionid", "parent transaction id"}}},
RPCResult{RPCResult::Type::ARR, "spentby", "unconfirmed transactions spending outputs from this transaction",
{RPCResult{RPCResult::Type::STR_HEX, "transactionid", "child transaction id"}}},
RPCResult{RPCResult::Type::BOOL, "bip125-replaceable", "Whether this transaction signals BIP125 replaceability or has an unconfirmed ancestor signaling BIP125 replaceability.\n"},
RPCResult{RPCResult::Type::BOOL, "unbroadcast", "Whether this transaction is currently unbroadcast (initial broadcast not yet acknowledged by any peers)"},
RPCResult{RPCResult::Type::NUM, "weight", "transaction weight as defined in BIP 141."},
RPCResult{RPCResult::Type::NUM_TIME, "time", "local time transaction entered pool in seconds since 1 Jan 1970 GMT"},
RPCResult{RPCResult::Type::NUM, "height", "block height when transaction entered pool"},
RPCResult{RPCResult::Type::NUM, "descendantcount", "number of in-mempool descendant transactions (including this one)"},
RPCResult{RPCResult::Type::NUM, "descendantsize", "virtual transaction size of in-mempool descendants (including this one)"},
RPCResult{RPCResult::Type::NUM, "ancestorcount", "number of in-mempool ancestor transactions (including this one)"},
RPCResult{RPCResult::Type::NUM, "ancestorsize", "virtual transaction size of in-mempool ancestors (including this one)"},
RPCResult{RPCResult::Type::NUM, "chunksize", "virtual transaction size of this transaction's chunk"},
RPCResult{RPCResult::Type::NUM, "clusterid", "id of the cluster containing this tx"},
RPCResult{RPCResult::Type::STR_HEX, "wtxid", "hash of serialized transaction, including witness data"},
RPCResult{RPCResult::Type::OBJ, "fees", "",
{
RPCResult{RPCResult::Type::STR_AMOUNT, "base", "transaction fee, denominated in " + CURRENCY_UNIT},
RPCResult{RPCResult::Type::STR_AMOUNT, "modified", "transaction fee with fee deltas used for mining priority, denominated in " + CURRENCY_UNIT},
RPCResult{RPCResult::Type::STR_AMOUNT, "ancestor", "transaction fees of in-mempool ancestors (including this one) with fee deltas used for mining priority, denominated in " + CURRENCY_UNIT},
RPCResult{RPCResult::Type::STR_AMOUNT, "descendant", "transaction fees of in-mempool descendants (including this one) with fee deltas used for mining priority, denominated in " + CURRENCY_UNIT},
RPCResult{RPCResult::Type::STR_AMOUNT, "chunk", "transaction fees of chunk, denominated in " + CURRENCY_UNIT},
}},
RPCResult{RPCResult::Type::ARR, "depends", "unconfirmed transactions used as inputs for this transaction",
{RPCResult{RPCResult::Type::STR_HEX, "transactionid", "parent transaction id"}}},
RPCResult{RPCResult::Type::ARR, "spentby", "unconfirmed transactions spending outputs from this transaction",
{RPCResult{RPCResult::Type::STR_HEX, "transactionid", "child transaction id"}}},
RPCResult{RPCResult::Type::BOOL, "bip125-replaceable", "Whether this transaction signals BIP125 replaceability or has an unconfirmed ancestor signaling BIP125 replaceability.\n"},
RPCResult{RPCResult::Type::BOOL, "unbroadcast", "Whether this transaction is currently unbroadcast (initial broadcast not yet acknowledged by any peers)"},
};
}
static void clusterToJSON(const CTxMemPool& pool, UniValue& info, const TxGraphCluster& c) EXCLUSIVE_LOCKS_REQUIRED(pool.cs)
{
AssertLockHeld(pool.cs);
info.pushKV("vsize", (int)c.m_tx_size);
info.pushKV("txcount", (int)c.m_tx_count);
info.pushKV("clusterid", (int)c.m_id);
UniValue chunks(UniValue::VARR);
for (auto &chunk : c.m_chunks) {
UniValue chunkdata(UniValue::VOBJ);
chunkdata.pushKV("vsize", (int)chunk.feerate.size);
chunkdata.pushKV("fee", ValueFromAmount((int)chunk.feerate.fee));
chunkdata.pushKV("feerate", ValueFromAmount(CFeeRate(chunk.feerate.fee, chunk.feerate.size).GetFeePerK()));
UniValue txids(UniValue::VARR);
for (auto &tx : chunk.txs) {
txids.push_back(static_cast<const CTxMemPoolEntry&>(tx.get()).GetTx().GetHash().ToString());
}
chunkdata.pushKV("txids", txids);
chunks.push_back(chunkdata);
}
info.pushKV("chunks", chunks);
}
static void entryToJSON(const CTxMemPool& pool, UniValue& info, const CTxMemPoolEntry& e) EXCLUSIVE_LOCKS_REQUIRED(pool.cs)
{
AssertLockHeld(pool.cs);
size_t ancestor_size{0}, descendant_size{0};
size_t ancestor_count{0}, descendant_count{0};
CAmount ancestor_fees{0}, descendant_fees{0};
pool.CalculateAncestorData(e, ancestor_count, ancestor_size, ancestor_fees);
pool.CalculateDescendantData(e, descendant_count, descendant_size, descendant_fees);
info.pushKV("vsize", (int)e.GetTxSize());
info.pushKV("weight", (int)e.GetTxWeight());
info.pushKV("time", count_seconds(e.GetTime()));
info.pushKV("height", (int)e.GetHeight());
info.pushKV("descendantcount", e.GetCountWithDescendants());
info.pushKV("descendantsize", e.GetSizeWithDescendants());
info.pushKV("ancestorcount", e.GetCountWithAncestors());
info.pushKV("ancestorsize", e.GetSizeWithAncestors());
info.pushKV("descendantcount", descendant_count);
info.pushKV("descendantsize", descendant_size);
info.pushKV("ancestorcount", ancestor_count);
info.pushKV("ancestorsize", ancestor_size);
info.pushKV("wtxid", e.GetTx().GetWitnessHash().ToString());
info.pushKV("chunksize", e.m_cluster->m_chunks[e.m_loc.first].feerate.size);
info.pushKV("clusterid", e.m_cluster->m_id);
UniValue fees(UniValue::VOBJ);
fees.pushKV("base", ValueFromAmount(e.GetFee()));
fees.pushKV("modified", ValueFromAmount(e.GetModifiedFee()));
fees.pushKV("ancestor", ValueFromAmount(e.GetModFeesWithAncestors()));
fees.pushKV("descendant", ValueFromAmount(e.GetModFeesWithDescendants()));
fees.pushKV("ancestor", ValueFromAmount(ancestor_fees));
fees.pushKV("descendant", ValueFromAmount(descendant_fees));
fees.pushKV("chunk", ValueFromAmount(e.m_cluster->m_chunks[e.m_loc.first].feerate.fee));
info.pushKV("fees", fees);
const CTransaction& tx = e.GetTx();
@ -314,7 +370,7 @@ static void entryToJSON(const CTxMemPool& pool, UniValue& info, const CTxMemPool
info.pushKV("depends", depends);
UniValue spent(UniValue::VARR);
for (const CTxMemPoolEntry& child : e.GetMemPoolChildrenConst()) {
for (const CTxMemPoolEntry& child : pool.GetChildren(e)) {
spent.push_back(child.GetTx().GetHash().ToString());
}
@ -371,6 +427,49 @@ UniValue MempoolToJSON(const CTxMemPool& pool, bool verbose, bool include_mempoo
}
}
static RPCHelpMan getmempoolfeeratediagram()
{
return RPCHelpMan{"getmempoolfeeratediagram",
"Returns the feerate diagram for the whole mempool.",
{},
{
RPCResult{"mempool chunks",
RPCResult::Type::ARR, "", "",
{
{
RPCResult::Type::OBJ, "", "",
{
{RPCResult::Type::NUM, "size", "cumulative size"},
{RPCResult::Type::NUM, "fee", "cumulative fee"}
}
}
}
}
},
RPCExamples{
HelpExampleCli("getmempoolfeeratediagram", "")
+ HelpExampleRpc("getmempoolfeeratediagram", "")
},
[&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue
{
const CTxMemPool& mempool = EnsureAnyMemPool(request.context);
LOCK(mempool.cs);
UniValue result(UniValue::VARR);
auto diagram = mempool.GetFeerateDiagram();
for (auto f : diagram) {
UniValue o(UniValue::VOBJ);
o.pushKV("size", f.size);
o.pushKV("fee", ValueFromAmount(f.fee));
result.push_back(o);
}
return result;
}
};
}
static RPCHelpMan getrawmempool()
{
return RPCHelpMan{"getrawmempool",
@ -459,21 +558,20 @@ static RPCHelpMan getmempoolancestors()
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Transaction not in mempool");
}
auto ancestors{mempool.AssumeCalculateMemPoolAncestors(self.m_name, *entry, CTxMemPool::Limits::NoLimits(), /*fSearchForParents=*/false)};
auto ancestors{mempool.CalculateMemPoolAncestorsFast(*entry, /*fSearchForParents=*/false)};
if (!fVerbose) {
UniValue o(UniValue::VARR);
for (CTxMemPool::txiter ancestorIt : ancestors) {
o.push_back(ancestorIt->GetTx().GetHash().ToString());
for (auto ancestor : ancestors) {
o.push_back(ancestor.get().GetTx().GetHash().ToString());
}
return o;
} else {
UniValue o(UniValue::VOBJ);
for (CTxMemPool::txiter ancestorIt : ancestors) {
const CTxMemPoolEntry &e = *ancestorIt;
const uint256& _hash = e.GetTx().GetHash();
for (auto ancestor : ancestors) {
const uint256& _hash = ancestor.get().GetTx().GetHash();
UniValue info(UniValue::VOBJ);
entryToJSON(mempool, info, e);
entryToJSON(mempool, info, ancestor.get());
o.pushKV(_hash.ToString(), info);
}
return o;
@ -520,21 +618,21 @@ static RPCHelpMan getmempooldescendants()
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Transaction not in mempool");
}
CTxMemPool::setEntries setDescendants;
mempool.CalculateDescendants(*it, setDescendants);
// CTxMemPool::CalculateDescendants will include the given tx
setDescendants.erase(*it);
CTxMemPool::Entries descendants = mempool.CalculateDescendants({*it});
// Note: CTxMemPool::CalculateDescendants will include the given tx
if (!fVerbose) {
UniValue o(UniValue::VARR);
for (CTxMemPool::txiter descendantIt : setDescendants) {
for (CTxMemPool::txiter descendantIt : descendants) {
if (descendantIt == it) continue;
o.push_back(descendantIt->GetTx().GetHash().ToString());
}
return o;
} else {
UniValue o(UniValue::VOBJ);
for (CTxMemPool::txiter descendantIt : setDescendants) {
for (CTxMemPool::txiter descendantIt : descendants) {
if (descendantIt == it) continue;
const CTxMemPoolEntry &e = *descendantIt;
const uint256& _hash = e.GetTx().GetHash();
UniValue info(UniValue::VOBJ);
@ -547,6 +645,40 @@ static RPCHelpMan getmempooldescendants()
};
}
static RPCHelpMan getmempoolcluster()
{
return RPCHelpMan{"getmempoolcluster",
"\nReturns mempool data for given cluster\n",
{
{"id", RPCArg::Type::NUM, RPCArg::Optional::NO, "The cluster id (must be in mempool)"},
},
RPCResult{
RPCResult::Type::OBJ, "", "", ClusterDescription()},
RPCExamples{
HelpExampleCli("getmempoolcluster", "cluster_id")
+ HelpExampleRpc("getmempoolcluster", "cluster_id")
},
[&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue
{
int64_t cluster_id = request.params[0].getInt<int>();
const CTxMemPool& mempool = EnsureAnyMemPool(request.context);
LOCK(mempool.cs);
LOCK(mempool.txgraph.cs);
auto it = mempool.txgraph.GetClusterMap().find(cluster_id);
if (it == mempool.txgraph.GetClusterMap().end()) {
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Cluster not in mempool");
}
const TxGraphCluster &c = *(it->second);
UniValue info(UniValue::VOBJ);
clusterToJSON(mempool, info, c);
return info;
},
};
}
static RPCHelpMan getmempoolentry()
{
return RPCHelpMan{"getmempoolentry",
@ -665,6 +797,7 @@ UniValue MempoolInfoToJSON(const CTxMemPool& pool)
{
// Make sure this call is atomic in the pool.
LOCK(pool.cs);
LOCK(pool.txgraph.cs);
UniValue ret(UniValue::VOBJ);
ret.pushKV("loaded", pool.GetLoadTried());
ret.pushKV("size", (int64_t)pool.size());
@ -677,6 +810,19 @@ UniValue MempoolInfoToJSON(const CTxMemPool& pool)
ret.pushKV("incrementalrelayfee", ValueFromAmount(pool.m_incremental_relay_feerate.GetFeePerK()));
ret.pushKV("unbroadcastcount", uint64_t{pool.GetUnbroadcastTxs().size()});
ret.pushKV("fullrbf", pool.m_full_rbf);
ret.pushKV("numberofclusters", pool.txgraph.GetClusterMap().size());
int64_t max_cluster_count = 0;
int64_t max_cluster_size = 0;
for (const auto& [id, cluster] : pool.txgraph.GetClusterMap()) {
if (cluster->m_tx_count > max_cluster_count) {
max_cluster_count = cluster->m_tx_count;
}
if (cluster->m_tx_size > max_cluster_size) {
max_cluster_size = cluster->m_tx_size;
}
}
ret.pushKV("maxclustercount", max_cluster_count);
ret.pushKV("maxclustersize", max_cluster_size);
return ret;
}
@ -699,6 +845,9 @@ static RPCHelpMan getmempoolinfo()
{RPCResult::Type::NUM, "incrementalrelayfee", "minimum fee rate increment for mempool limiting or replacement in " + CURRENCY_UNIT + "/kvB"},
{RPCResult::Type::NUM, "unbroadcastcount", "Current number of transactions that haven't passed initial broadcast yet"},
{RPCResult::Type::BOOL, "fullrbf", "True if the mempool accepts RBF without replaceability signaling inspection"},
{RPCResult::Type::NUM, "numberofclusters", "Number of mempool clusters"},
{RPCResult::Type::NUM, "maxclustercount", "Count of biggest cluster"},
{RPCResult::Type::NUM, "maxclustersize", "Size of biggest cluster"}
}},
RPCExamples{
HelpExampleCli("getmempoolinfo", "")
@ -1020,8 +1169,10 @@ void RegisterMempoolRPCCommands(CRPCTable& t)
{"blockchain", &getmempoolancestors},
{"blockchain", &getmempooldescendants},
{"blockchain", &getmempoolentry},
{"blockchain", &getmempoolcluster},
{"blockchain", &gettxspendingprevout},
{"blockchain", &getmempoolinfo},
{"blockchain", &getmempoolfeeratediagram},
{"blockchain", &getrawmempool},
{"blockchain", &importmempool},
{"blockchain", &savemempool},

View File

@ -105,89 +105,4 @@ FUZZ_TARGET(mini_miner, .init = initialize_miner)
assert (sum_fees >= *total_bumpfee);
}
// Test that MiniMiner and BlockAssembler build the same block given the same transactions and constraints.
FUZZ_TARGET(mini_miner_selection, .init = initialize_miner)
{
FuzzedDataProvider fuzzed_data_provider{buffer.data(), buffer.size()};
CTxMemPool pool{CTxMemPool::Options{}};
// Make a copy to preserve determinism.
std::deque<COutPoint> available_coins = g_available_coins;
std::vector<CTransactionRef> transactions;
LOCK2(::cs_main, pool.cs);
LIMITED_WHILE(fuzzed_data_provider.ConsumeBool(), 100)
{
CMutableTransaction mtx = CMutableTransaction();
assert(!available_coins.empty());
const size_t num_inputs = std::min(size_t{2}, available_coins.size());
const size_t num_outputs = fuzzed_data_provider.ConsumeIntegralInRange<size_t>(2, 5);
for (size_t n{0}; n < num_inputs; ++n) {
auto prevout = available_coins.at(0);
mtx.vin.emplace_back(prevout, CScript());
available_coins.pop_front();
}
for (uint32_t n{0}; n < num_outputs; ++n) {
mtx.vout.emplace_back(100, P2WSH_OP_TRUE);
}
CTransactionRef tx = MakeTransactionRef(mtx);
// First 2 outputs are available to spend. The rest are added to outpoints to calculate bumpfees.
// There is no overlap between spendable coins and outpoints passed to MiniMiner because the
// MiniMiner interprets spent coins as to-be-replaced and excludes them.
for (uint32_t n{0}; n < num_outputs - 1; ++n) {
if (fuzzed_data_provider.ConsumeBool()) {
available_coins.emplace_front(tx->GetHash(), n);
} else {
available_coins.emplace_back(tx->GetHash(), n);
}
}
// Stop if pool reaches DEFAULT_BLOCK_MAX_WEIGHT because BlockAssembler will stop when the
// block template reaches that, but the MiniMiner will keep going.
if (pool.GetTotalTxSize() + GetVirtualTransactionSize(*tx) >= DEFAULT_BLOCK_MAX_WEIGHT) break;
TestMemPoolEntryHelper entry;
const CAmount fee{ConsumeMoney(fuzzed_data_provider, /*max=*/MAX_MONEY/100000)};
assert(MoneyRange(fee));
pool.addUnchecked(entry.Fee(fee).FromTx(tx));
transactions.push_back(tx);
}
std::vector<COutPoint> outpoints;
for (const auto& coin : g_available_coins) {
if (!pool.GetConflictTx(coin)) outpoints.push_back(coin);
}
for (const auto& tx : transactions) {
assert(pool.exists(GenTxid::Txid(tx->GetHash())));
for (uint32_t n{0}; n < tx->vout.size(); ++n) {
COutPoint coin{tx->GetHash(), n};
if (!pool.GetConflictTx(coin)) outpoints.push_back(coin);
}
}
const CFeeRate target_feerate{ConsumeMoney(fuzzed_data_provider, /*max=*/MAX_MONEY/100000)};
node::BlockAssembler::Options miner_options;
miner_options.blockMinFeeRate = target_feerate;
miner_options.nBlockMaxWeight = DEFAULT_BLOCK_MAX_WEIGHT;
miner_options.test_block_validity = false;
node::BlockAssembler miner{g_setup->m_node.chainman->ActiveChainstate(), &pool, miner_options};
node::MiniMiner mini_miner{pool, outpoints};
assert(mini_miner.IsReadyToCalculate());
CScript spk_placeholder = CScript() << OP_0;
// Use BlockAssembler as oracle. BlockAssembler and MiniMiner should select the same
// transactions, stopping once packages do not meet target_feerate.
const auto blocktemplate{miner.CreateNewBlock(spk_placeholder)};
mini_miner.BuildMockTemplate(target_feerate);
assert(!mini_miner.IsReadyToCalculate());
auto mock_template_txids = mini_miner.GetMockTemplateTxids();
// MiniMiner doesn't add a coinbase tx.
assert(mock_template_txids.count(blocktemplate->block.vtx[0]->GetHash()) == 0);
mock_template_txids.emplace(blocktemplate->block.vtx[0]->GetHash());
assert(mock_template_txids.size() <= blocktemplate->block.vtx.size());
assert(mock_template_txids.size() >= blocktemplate->block.vtx.size());
assert(mock_template_txids.size() == blocktemplate->block.vtx.size());
for (const auto& tx : blocktemplate->block.vtx) {
assert(mock_template_txids.count(tx->GetHash()));
}
}
} // namespace

View File

@ -72,7 +72,7 @@ FUZZ_TARGET(partially_downloaded_block, .init = initialize_pdb)
available.insert(i);
}
if (add_to_mempool) {
if (add_to_mempool && !pool.exists(GenTxid::Txid(tx->GetHash()))) {
LOCK2(cs_main, pool.cs);
pool.addUnchecked(ConsumeTxMemPoolEntry(fuzzed_data_provider, *tx));
available.insert(i);

View File

@ -96,15 +96,25 @@ FUZZ_TARGET(package_rbf, .init = initialize_package_rbf)
std::vector<CTransaction> mempool_txs;
size_t iter{0};
int32_t replacement_vsize = fuzzed_data_provider.ConsumeIntegralInRange<int32_t>(1, 1000000);
// Keep track of the total vsize of CTxMemPoolEntry's being added to the mempool to avoid overflow
// Add replacement_vsize since this is added to new diagram during RBF check
std::optional<CMutableTransaction> replacement_tx = ConsumeDeserializable<CMutableTransaction>(fuzzed_data_provider, TX_WITH_WITNESS);
if (!replacement_tx) {
return;
}
assert(iter <= g_outpoints.size());
replacement_tx->vin.resize(1);
replacement_tx->vin[0].prevout = g_outpoints[iter++];
CTransaction replacement_tx_final{*replacement_tx};
auto replacement_entry = ConsumeTxMemPoolEntry(fuzzed_data_provider, replacement_tx_final);
int32_t replacement_vsize = replacement_entry.GetTxSize();
int64_t running_vsize_total{replacement_vsize};
LOCK2(cs_main, pool.cs);
LIMITED_WHILE(fuzzed_data_provider.ConsumeBool(), NUM_ITERS)
LIMITED_WHILE(fuzzed_data_provider.ConsumeBool(), NUM_ITERS-1)
{
// Make sure txns only have one input, and that a unique input is given to avoid circular references
std::optional<CMutableTransaction> parent = ConsumeDeserializable<CMutableTransaction>(fuzzed_data_provider, TX_WITH_WITNESS);
@ -124,9 +134,10 @@ FUZZ_TARGET(package_rbf, .init = initialize_package_rbf)
break;
}
pool.addUnchecked(parent_entry);
if (fuzzed_data_provider.ConsumeBool() && !child->vin.empty()) {
child->vin[0].prevout = COutPoint{mempool_txs.back().GetHash(), 0};
if (child->vin.size() < 1) {
child->vin.resize(1);
}
child->vin[0].prevout = COutPoint{mempool_txs.back().GetHash(), 0};
mempool_txs.emplace_back(*child);
const auto child_entry = ConsumeTxMemPoolEntry(fuzzed_data_provider, mempool_txs.back());
running_vsize_total += child_entry.GetTxSize();
@ -153,12 +164,14 @@ FUZZ_TARGET(package_rbf, .init = initialize_package_rbf)
// Calculate all conflicts:
CTxMemPool::setEntries all_conflicts;
for (auto& txiter : direct_conflicts) {
pool.CalculateDescendants(txiter, all_conflicts);
auto descendants = pool.CalculateDescendants({txiter});
all_conflicts.insert(descendants.begin(), descendants.end());
}
// Calculate the chunks for a replacement.
CAmount replacement_fees = ConsumeMoney(fuzzed_data_provider);
auto calc_results{pool.CalculateChunksForRBF(replacement_fees, replacement_vsize, direct_conflicts, all_conflicts)};
// Calculate the chunks for a replacement.
auto calc_results{pool.CalculateChunksForRBF({{&replacement_entry, replacement_fees}}, direct_conflicts, all_conflicts)};
if (calc_results.has_value()) {
// Sanity checks on the chunks.
@ -186,7 +199,7 @@ FUZZ_TARGET(package_rbf, .init = initialize_package_rbf)
}
// If internals report error, wrapper should too
auto err_tuple{ImprovesFeerateDiagram(pool, direct_conflicts, all_conflicts, replacement_fees, replacement_vsize)};
auto err_tuple{ImprovesFeerateDiagram(pool, direct_conflicts, all_conflicts, {{&replacement_entry, replacement_fees}})};
if (!calc_results.has_value()) {
assert(err_tuple.value().first == DiagramCheckError::UNCALCULABLE);
} else {

View File

@ -134,6 +134,9 @@ const std::vector<std::string> RPC_COMMANDS_SAFE_FOR_FUZZING{
"getmempoolancestors",
"getmempooldescendants",
"getmempoolentry",
"getmempoolfeeratediagram",
"getmempoolcluster",
"gettxspendingprevout",
"getmempoolinfo",
"getmininginfo",
"getnettotals",

View File

@ -73,14 +73,10 @@ struct TransactionsDelta final : public CValidationInterface {
void SetMempoolConstraints(ArgsManager& args, FuzzedDataProvider& fuzzed_data_provider)
{
args.ForceSetArg("-limitancestorcount",
ToString(fuzzed_data_provider.ConsumeIntegralInRange<unsigned>(0, 50)));
args.ForceSetArg("-limitancestorsize",
ToString(fuzzed_data_provider.ConsumeIntegralInRange<unsigned>(0, 202)));
args.ForceSetArg("-limitdescendantcount",
ToString(fuzzed_data_provider.ConsumeIntegralInRange<unsigned>(0, 50)));
args.ForceSetArg("-limitdescendantsize",
ToString(fuzzed_data_provider.ConsumeIntegralInRange<unsigned>(0, 202)));
args.ForceSetArg("-limitclustercount",
ToString(fuzzed_data_provider.ConsumeIntegralInRange<unsigned>(0, 150)));
args.ForceSetArg("-limitclustersize",
ToString(fuzzed_data_provider.ConsumeIntegralInRange<unsigned>(0, 250)));
args.ForceSetArg("-maxmempool",
ToString(fuzzed_data_provider.ConsumeIntegralInRange<unsigned>(0, 200)));
args.ForceSetArg("-mempoolexpiry",
@ -97,6 +93,23 @@ void Finish(FuzzedDataProvider& fuzzed_data_provider, MockedTxPool& tx_pool, Cha
auto assembler = BlockAssembler{chainstate, &tx_pool, options};
auto block_template = assembler.CreateNewBlock(CScript{} << OP_TRUE);
Assert(block_template->block.vtx.size() >= 1);
// Try updating the mempool for this block, as though it were mined.
LOCK2(::cs_main, tx_pool.cs);
tx_pool.removeForBlock(block_template->block.vtx, chainstate.m_chain.Height() + 1);
// Now try to add those transactions back, as though a reorg happened.
std::vector<uint256> hashes_to_update;
for (const auto& tx : block_template->block.vtx) {
const auto res = AcceptToMemoryPool(chainstate, tx, GetTime(), true, /*test_accept=*/false);
if (res.m_result_type == MempoolAcceptResult::ResultType::VALID) {
hashes_to_update.push_back(tx->GetHash());
} else {
tx_pool.removeRecursive(*tx, MemPoolRemovalReason::REORG /* dummy */);
}
}
tx_pool.UpdateTransactionsFromBlock(hashes_to_update);
tx_pool.check(chainstate.CoinsTip(), chainstate.m_chain.Height() + 1);
}
const auto info_all = tx_pool.infoAll();
if (!info_all.empty()) {
@ -105,6 +118,19 @@ void Finish(FuzzedDataProvider& fuzzed_data_provider, MockedTxPool& tx_pool, Cha
assert(tx_pool.size() < info_all.size());
WITH_LOCK(::cs_main, tx_pool.check(chainstate.CoinsTip(), chainstate.m_chain.Height() + 1));
}
if (fuzzed_data_provider.ConsumeBool()) {
// Try eviction
LOCK2(::cs_main, tx_pool.cs);
tx_pool.TrimToSize(fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0U, tx_pool.DynamicMemoryUsage()));
tx_pool.check(chainstate.CoinsTip(), chainstate.m_chain.Height() + 1);
}
if (fuzzed_data_provider.ConsumeBool()) {
// Try expiry
LOCK2(::cs_main, tx_pool.cs);
tx_pool.Expire(GetMockTime() - std::chrono::seconds(fuzzed_data_provider.ConsumeIntegral<uint32_t>()));
tx_pool.check(chainstate.CoinsTip(), chainstate.m_chain.Height() + 1);
}
g_setup->m_node.validation_signals->SyncWithValidationInterfaceQueue();
}

249
src/test/fuzz/txgraph.cpp Normal file
View File

@ -0,0 +1,249 @@
// Copyright (c) 2024 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <kernel/txgraph.h>
#include <test/fuzz/FuzzedDataProvider.h>
#include <test/fuzz/fuzz.h>
#include <test/fuzz/util.h>
namespace {
bool CheckClusterSizeLimits(TxGraph& txgraph, TxEntry *entry, std::vector<TxEntry::TxEntryRef>& parents, GraphLimits limits)
{
int64_t total_cluster_count{0};
int64_t total_cluster_vbytes{0};
txgraph.GetClusterSize(parents, total_cluster_vbytes, total_cluster_count);
total_cluster_count += 1;
total_cluster_vbytes += entry->GetTxSize();
if (total_cluster_count <= limits.cluster_count && total_cluster_vbytes <= limits.cluster_size_vbytes) {
return true;
}
return false;
}
FUZZ_TARGET(txgraph)
{
FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size());
TxGraph txgraph;
// Pick random cluster limits.
GraphLimits limits;
limits.cluster_count = fuzzed_data_provider.ConsumeIntegralInRange<int64_t>(1, 100);
limits.cluster_size_vbytes = fuzzed_data_provider.ConsumeIntegralInRange<int64_t>(1000, 101000);
// Generate a random number of random fee/size transactions to seed the txgraph.
std::vector<TxEntry*> all_entries;
std::set<int64_t> in_txgraph;
LIMITED_WHILE(fuzzed_data_provider.ConsumeBool(), 1000) {
TxEntry *entry = new
TxEntry(fuzzed_data_provider.ConsumeIntegralInRange<int64_t>(60, 100'000),
fuzzed_data_provider.ConsumeIntegralInRange<int64_t>(0, 100'000*1'000));
all_entries.emplace_back(entry);
std::vector<TxEntry::TxEntryRef> parents;
for (size_t i=0; i<all_entries.size(); ++i) {
if (fuzzed_data_provider.ConsumeBool() && in_txgraph.count(all_entries[i]->unique_id)) {
parents.emplace_back(*all_entries[i]);
}
}
if (CheckClusterSizeLimits(txgraph, entry, parents, limits)) {
txgraph.AddTx(entry, entry->GetTxSize(), entry->GetModifiedFee(), parents);
in_txgraph.insert(entry->unique_id);
}
txgraph.Check(limits);
}
LIMITED_WHILE(fuzzed_data_provider.ConsumeBool(), 100) {
if (fuzzed_data_provider.ConsumeBool()) {
// Do an RBF.
std::vector<TxEntry::TxEntryRef> direct_conflicts;
for (size_t i=0; i<all_entries.size(); ++i) {
if (fuzzed_data_provider.ConsumeBool() && in_txgraph.count(all_entries[i]->unique_id)) {
direct_conflicts.emplace_back(*all_entries[i]);
}
}
std::vector<TxEntry::TxEntryRef> all_conflicts = txgraph.GetDescendants(direct_conflicts);
TxGraphChangeSet changeset(&txgraph, limits, all_conflicts);
TxEntry *entry = new
TxEntry(fuzzed_data_provider.ConsumeIntegralInRange<int64_t>(60, 100'000),
fuzzed_data_provider.ConsumeIntegralInRange<int64_t>(0, 1'000*100'000));
all_entries.emplace_back(entry);
std::vector<TxEntry::TxEntryRef> parents;
for (size_t i=0; i<all_entries.size(); ++i) {
// Skip anything that is a conflict
bool conflict{false};
for (auto &c: all_conflicts) {
if (c.get().unique_id == all_entries[i]->unique_id) {
conflict = true;
break;
}
}
if (conflict) continue;
if (fuzzed_data_provider.ConsumeBool() && in_txgraph.count(all_entries[i]->unique_id)) {
parents.emplace_back(*all_entries[i]);
}
}
if (changeset.AddTx(*entry, parents)) {
std::vector<FeeFrac> diagram_dummy;
changeset.GetFeerateDiagramOld(diagram_dummy);
changeset.GetFeerateDiagramNew(diagram_dummy);
if (fuzzed_data_provider.ConsumeBool()) {
changeset.Apply();
in_txgraph.insert(entry->unique_id);
for (auto &c: all_conflicts) {
in_txgraph.erase(c.get().unique_id);
}
}
}
}
txgraph.Check(limits);
if (fuzzed_data_provider.ConsumeBool()) {
// Remove a random transaction and its descendants
for (size_t i=0; i < all_entries.size(); ++i) {
if (in_txgraph.count(all_entries[i]->unique_id) && fuzzed_data_provider.ConsumeBool()) {
std::vector<TxEntry::TxEntryRef> to_remove = txgraph.GetDescendants({*all_entries[i]});
txgraph.RemoveBatch(to_remove);
for (size_t k=0; k<to_remove.size(); ++k) {
in_txgraph.erase(to_remove[k].get().unique_id);
}
}
}
}
if (fuzzed_data_provider.ConsumeBool()) {
// simulate connecting a block
uint64_t num_transactions_to_select = fuzzed_data_provider.ConsumeIntegralInRange<uint64_t>(1, in_txgraph.size()/20+1);
std::set<TxEntry::TxEntryRef, TxEntry::CompareById> selected_transactions;
for (size_t i=0; i<all_entries.size(); ++i) {
if (fuzzed_data_provider.ConsumeBool() && in_txgraph.count(all_entries[i]->unique_id)) {
std::vector<TxEntry::TxEntryRef> to_mine = txgraph.GetAncestors({*all_entries[i]});
selected_transactions.insert(to_mine.begin(), to_mine.end());
if (selected_transactions.size() >= num_transactions_to_select) {
break;
}
}
}
if (selected_transactions.size() > 0) {
std::vector<TxEntry::TxEntryRef> selected_transactions_vec(selected_transactions.begin(), selected_transactions.end());
txgraph.RemoveBatch(selected_transactions_vec);
for (size_t j=0; j<selected_transactions_vec.size(); ++j) {
in_txgraph.erase(selected_transactions_vec[j].get().unique_id);
}
}
txgraph.Check(limits);
}
if (fuzzed_data_provider.ConsumeBool()) {
// Test the TxSelector (mining code).
TxSelector txselector(&txgraph);
int64_t num_invocations = fuzzed_data_provider.ConsumeIntegralInRange<int64_t>(1, in_txgraph.size()/20+1);
std::vector<TxEntry::TxEntryRef> selected_transactions;
while (num_invocations > 0) {
txselector.SelectNextChunk(selected_transactions);
// TODO: add a check that the feerates never go up as we make
// further calls.
if (fuzzed_data_provider.ConsumeBool()) {
txselector.Success();
}
--num_invocations;
}
txgraph.RemoveBatch(selected_transactions);
for (size_t k=0; k<selected_transactions.size(); ++k) {
in_txgraph.erase(selected_transactions[k].get().unique_id);
}
// Check that the selected transactions are topologically valid.
// Do this by dumping into a cluster, and running Cluster::Check
TxGraphCluster dummy(-1, &txgraph);
for (auto& tx : selected_transactions) {
tx.get().m_cluster = &dummy;
dummy.AddTransaction(tx, false);
}
dummy.Rechunk();
assert(dummy.Check());
txgraph.Check(limits);
}
if (fuzzed_data_provider.ConsumeBool()) {
// Run eviction
Trimmer trimmer(&txgraph);
std::vector<TxEntry::TxEntryRef> removed;
auto feerate = trimmer.RemoveWorstChunk(removed);
// Check that the feerate matches what was removed
CAmount total_fee{0};
int32_t total_size{0};
for (auto &entry : removed) {
total_fee += entry.get().GetModifiedFee();
total_size += entry.get().GetTxSize();
in_txgraph.erase(entry.get().unique_id);
}
assert(feerate == CFeeRate(total_fee, total_size));
}
txgraph.Check(limits);
if (fuzzed_data_provider.ConsumeBool()) {
// do a reorg
// Generate some random transactions and pick some existing random
// transactions to have as children.
int64_t num_to_add = fuzzed_data_provider.ConsumeIntegralInRange<int64_t>(1, 100);
std::map<int64_t, std::vector<TxEntry::TxEntryRef>> children_map;
std::vector<TxEntry::TxEntryRef> new_transactions;
std::set<int64_t> new_transaction_ids;
for (int k=0; k<num_to_add; ++k) {
all_entries.emplace_back(new
TxEntry(fuzzed_data_provider.ConsumeIntegralInRange<int64_t>(60,
100'000),
fuzzed_data_provider.ConsumeIntegralInRange<int64_t>(0,
1'000*100'000)));
new_transactions.emplace_back(*all_entries.back());
children_map.insert(std::make_pair(all_entries.back()->unique_id, std::vector<TxEntry::TxEntryRef>()));
for (auto &id : in_txgraph) {
if (new_transaction_ids.count(id)) continue;
if (fuzzed_data_provider.ConsumeBool()) {
// Take advantage of the fact that the unique id's for
// each transaction correspond to the index in
// all_entries
for (auto entry : all_entries) {
if (entry->unique_id == id) {
children_map[all_entries.back()->unique_id].emplace_back(*entry);
break;
}
}
}
}
// Pick some random parents, amongst the set of new
// transactions.
std::vector<TxEntry::TxEntryRef> parents;
for (size_t m=0; m<new_transactions.size()-1; ++m) {
if (fuzzed_data_provider.ConsumeBool()) {
parents.emplace_back(new_transactions[m]);
}
}
if (CheckClusterSizeLimits(txgraph, all_entries.back(), parents, limits)) {
txgraph.AddTx(all_entries.back(), all_entries.back()->GetTxSize(), all_entries.back()->GetModifiedFee(), parents);
txgraph.Check(limits);
in_txgraph.insert(all_entries.back()->unique_id);
new_transaction_ids.insert(all_entries.back()->unique_id);
} else {
new_transactions.pop_back();
}
}
std::vector<TxEntry::TxEntryRef> removed;
txgraph.AddParentTxs(new_transactions, limits, [&](const TxEntry& tx) { return children_map[tx.unique_id]; }, removed);
for (auto r : removed) {
in_txgraph.erase(r.get().unique_id);
}
txgraph.Check(limits);
}
}
txgraph.Check(limits);
// free the memory that was used
for (auto txentry : all_entries) {
delete txentry;
}
all_entries.clear();
}
} // namespace

View File

@ -115,318 +115,6 @@ BOOST_AUTO_TEST_CASE(MempoolRemoveTest)
BOOST_CHECK_EQUAL(testPool.size(), 0U);
}
template <typename name>
static void CheckSort(CTxMemPool& pool, std::vector<std::string>& sortedOrder) EXCLUSIVE_LOCKS_REQUIRED(pool.cs)
{
BOOST_CHECK_EQUAL(pool.size(), sortedOrder.size());
typename CTxMemPool::indexed_transaction_set::index<name>::type::iterator it = pool.mapTx.get<name>().begin();
int count = 0;
for (; it != pool.mapTx.get<name>().end(); ++it, ++count) {
BOOST_CHECK_EQUAL(it->GetTx().GetHash().ToString(), sortedOrder[count]);
}
}
BOOST_AUTO_TEST_CASE(MempoolIndexingTest)
{
CTxMemPool& pool = *Assert(m_node.mempool);
LOCK2(cs_main, pool.cs);
TestMemPoolEntryHelper entry;
/* 3rd highest fee */
CMutableTransaction tx1 = CMutableTransaction();
tx1.vout.resize(1);
tx1.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
tx1.vout[0].nValue = 10 * COIN;
pool.addUnchecked(entry.Fee(10000LL).FromTx(tx1));
/* highest fee */
CMutableTransaction tx2 = CMutableTransaction();
tx2.vout.resize(1);
tx2.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
tx2.vout[0].nValue = 2 * COIN;
pool.addUnchecked(entry.Fee(20000LL).FromTx(tx2));
/* lowest fee */
CMutableTransaction tx3 = CMutableTransaction();
tx3.vout.resize(1);
tx3.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
tx3.vout[0].nValue = 5 * COIN;
pool.addUnchecked(entry.Fee(0LL).FromTx(tx3));
/* 2nd highest fee */
CMutableTransaction tx4 = CMutableTransaction();
tx4.vout.resize(1);
tx4.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
tx4.vout[0].nValue = 6 * COIN;
pool.addUnchecked(entry.Fee(15000LL).FromTx(tx4));
/* equal fee rate to tx1, but newer */
CMutableTransaction tx5 = CMutableTransaction();
tx5.vout.resize(1);
tx5.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
tx5.vout[0].nValue = 11 * COIN;
entry.time = NodeSeconds{1s};
pool.addUnchecked(entry.Fee(10000LL).FromTx(tx5));
BOOST_CHECK_EQUAL(pool.size(), 5U);
std::vector<std::string> sortedOrder;
sortedOrder.resize(5);
sortedOrder[0] = tx3.GetHash().ToString(); // 0
sortedOrder[1] = tx5.GetHash().ToString(); // 10000
sortedOrder[2] = tx1.GetHash().ToString(); // 10000
sortedOrder[3] = tx4.GetHash().ToString(); // 15000
sortedOrder[4] = tx2.GetHash().ToString(); // 20000
CheckSort<descendant_score>(pool, sortedOrder);
/* low fee but with high fee child */
/* tx6 -> tx7 -> tx8, tx9 -> tx10 */
CMutableTransaction tx6 = CMutableTransaction();
tx6.vout.resize(1);
tx6.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
tx6.vout[0].nValue = 20 * COIN;
pool.addUnchecked(entry.Fee(0LL).FromTx(tx6));
BOOST_CHECK_EQUAL(pool.size(), 6U);
// Check that at this point, tx6 is sorted low
sortedOrder.insert(sortedOrder.begin(), tx6.GetHash().ToString());
CheckSort<descendant_score>(pool, sortedOrder);
CTxMemPool::setEntries setAncestors;
setAncestors.insert(pool.GetIter(tx6.GetHash()).value());
CMutableTransaction tx7 = CMutableTransaction();
tx7.vin.resize(1);
tx7.vin[0].prevout = COutPoint(tx6.GetHash(), 0);
tx7.vin[0].scriptSig = CScript() << OP_11;
tx7.vout.resize(2);
tx7.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
tx7.vout[0].nValue = 10 * COIN;
tx7.vout[1].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
tx7.vout[1].nValue = 1 * COIN;
auto ancestors_calculated{pool.CalculateMemPoolAncestors(entry.Fee(2000000LL).FromTx(tx7), CTxMemPool::Limits::NoLimits())};
BOOST_REQUIRE(ancestors_calculated.has_value());
BOOST_CHECK(*ancestors_calculated == setAncestors);
pool.addUnchecked(entry.FromTx(tx7), setAncestors);
BOOST_CHECK_EQUAL(pool.size(), 7U);
// Now tx6 should be sorted higher (high fee child): tx7, tx6, tx2, ...
sortedOrder.erase(sortedOrder.begin());
sortedOrder.push_back(tx6.GetHash().ToString());
sortedOrder.push_back(tx7.GetHash().ToString());
CheckSort<descendant_score>(pool, sortedOrder);
/* low fee child of tx7 */
CMutableTransaction tx8 = CMutableTransaction();
tx8.vin.resize(1);
tx8.vin[0].prevout = COutPoint(tx7.GetHash(), 0);
tx8.vin[0].scriptSig = CScript() << OP_11;
tx8.vout.resize(1);
tx8.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
tx8.vout[0].nValue = 10 * COIN;
setAncestors.insert(pool.GetIter(tx7.GetHash()).value());
pool.addUnchecked(entry.Fee(0LL).Time(NodeSeconds{2s}).FromTx(tx8), setAncestors);
// Now tx8 should be sorted low, but tx6/tx both high
sortedOrder.insert(sortedOrder.begin(), tx8.GetHash().ToString());
CheckSort<descendant_score>(pool, sortedOrder);
/* low fee child of tx7 */
CMutableTransaction tx9 = CMutableTransaction();
tx9.vin.resize(1);
tx9.vin[0].prevout = COutPoint(tx7.GetHash(), 1);
tx9.vin[0].scriptSig = CScript() << OP_11;
tx9.vout.resize(1);
tx9.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
tx9.vout[0].nValue = 1 * COIN;
pool.addUnchecked(entry.Fee(0LL).Time(NodeSeconds{3s}).FromTx(tx9), setAncestors);
// tx9 should be sorted low
BOOST_CHECK_EQUAL(pool.size(), 9U);
sortedOrder.insert(sortedOrder.begin(), tx9.GetHash().ToString());
CheckSort<descendant_score>(pool, sortedOrder);
std::vector<std::string> snapshotOrder = sortedOrder;
setAncestors.insert(pool.GetIter(tx8.GetHash()).value());
setAncestors.insert(pool.GetIter(tx9.GetHash()).value());
/* tx10 depends on tx8 and tx9 and has a high fee*/
CMutableTransaction tx10 = CMutableTransaction();
tx10.vin.resize(2);
tx10.vin[0].prevout = COutPoint(tx8.GetHash(), 0);
tx10.vin[0].scriptSig = CScript() << OP_11;
tx10.vin[1].prevout = COutPoint(tx9.GetHash(), 0);
tx10.vin[1].scriptSig = CScript() << OP_11;
tx10.vout.resize(1);
tx10.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
tx10.vout[0].nValue = 10 * COIN;
ancestors_calculated = pool.CalculateMemPoolAncestors(entry.Fee(200000LL).Time(NodeSeconds{4s}).FromTx(tx10), CTxMemPool::Limits::NoLimits());
BOOST_REQUIRE(ancestors_calculated);
BOOST_CHECK(*ancestors_calculated == setAncestors);
pool.addUnchecked(entry.FromTx(tx10), setAncestors);
/**
* tx8 and tx9 should both now be sorted higher
* Final order after tx10 is added:
*
* tx3 = 0 (1)
* tx5 = 10000 (1)
* tx1 = 10000 (1)
* tx4 = 15000 (1)
* tx2 = 20000 (1)
* tx9 = 200k (2 txs)
* tx8 = 200k (2 txs)
* tx10 = 200k (1 tx)
* tx6 = 2.2M (5 txs)
* tx7 = 2.2M (4 txs)
*/
sortedOrder.erase(sortedOrder.begin(), sortedOrder.begin()+2); // take out tx9, tx8 from the beginning
sortedOrder.insert(sortedOrder.begin()+5, tx9.GetHash().ToString());
sortedOrder.insert(sortedOrder.begin()+6, tx8.GetHash().ToString());
sortedOrder.insert(sortedOrder.begin()+7, tx10.GetHash().ToString()); // tx10 is just before tx6
CheckSort<descendant_score>(pool, sortedOrder);
// there should be 10 transactions in the mempool
BOOST_CHECK_EQUAL(pool.size(), 10U);
// Now try removing tx10 and verify the sort order returns to normal
pool.removeRecursive(*Assert(pool.get(tx10.GetHash())), REMOVAL_REASON_DUMMY);
CheckSort<descendant_score>(pool, snapshotOrder);
pool.removeRecursive(*Assert(pool.get(tx9.GetHash())), REMOVAL_REASON_DUMMY);
pool.removeRecursive(*Assert(pool.get(tx8.GetHash())), REMOVAL_REASON_DUMMY);
}
BOOST_AUTO_TEST_CASE(MempoolAncestorIndexingTest)
{
CTxMemPool& pool = *Assert(m_node.mempool);
LOCK2(cs_main, pool.cs);
TestMemPoolEntryHelper entry;
/* 3rd highest fee */
CMutableTransaction tx1 = CMutableTransaction();
tx1.vout.resize(1);
tx1.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
tx1.vout[0].nValue = 10 * COIN;
pool.addUnchecked(entry.Fee(10000LL).FromTx(tx1));
/* highest fee */
CMutableTransaction tx2 = CMutableTransaction();
tx2.vout.resize(1);
tx2.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
tx2.vout[0].nValue = 2 * COIN;
pool.addUnchecked(entry.Fee(20000LL).FromTx(tx2));
uint64_t tx2Size = GetVirtualTransactionSize(CTransaction(tx2));
/* lowest fee */
CMutableTransaction tx3 = CMutableTransaction();
tx3.vout.resize(1);
tx3.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
tx3.vout[0].nValue = 5 * COIN;
pool.addUnchecked(entry.Fee(0LL).FromTx(tx3));
/* 2nd highest fee */
CMutableTransaction tx4 = CMutableTransaction();
tx4.vout.resize(1);
tx4.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
tx4.vout[0].nValue = 6 * COIN;
pool.addUnchecked(entry.Fee(15000LL).FromTx(tx4));
/* equal fee rate to tx1, but newer */
CMutableTransaction tx5 = CMutableTransaction();
tx5.vout.resize(1);
tx5.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
tx5.vout[0].nValue = 11 * COIN;
pool.addUnchecked(entry.Fee(10000LL).FromTx(tx5));
BOOST_CHECK_EQUAL(pool.size(), 5U);
std::vector<std::string> sortedOrder;
sortedOrder.resize(5);
sortedOrder[0] = tx2.GetHash().ToString(); // 20000
sortedOrder[1] = tx4.GetHash().ToString(); // 15000
// tx1 and tx5 are both 10000
// Ties are broken by hash, not timestamp, so determine which
// hash comes first.
if (tx1.GetHash() < tx5.GetHash()) {
sortedOrder[2] = tx1.GetHash().ToString();
sortedOrder[3] = tx5.GetHash().ToString();
} else {
sortedOrder[2] = tx5.GetHash().ToString();
sortedOrder[3] = tx1.GetHash().ToString();
}
sortedOrder[4] = tx3.GetHash().ToString(); // 0
CheckSort<ancestor_score>(pool, sortedOrder);
/* low fee parent with high fee child */
/* tx6 (0) -> tx7 (high) */
CMutableTransaction tx6 = CMutableTransaction();
tx6.vout.resize(1);
tx6.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
tx6.vout[0].nValue = 20 * COIN;
uint64_t tx6Size = GetVirtualTransactionSize(CTransaction(tx6));
pool.addUnchecked(entry.Fee(0LL).FromTx(tx6));
BOOST_CHECK_EQUAL(pool.size(), 6U);
// Ties are broken by hash
if (tx3.GetHash() < tx6.GetHash())
sortedOrder.push_back(tx6.GetHash().ToString());
else
sortedOrder.insert(sortedOrder.end()-1,tx6.GetHash().ToString());
CheckSort<ancestor_score>(pool, sortedOrder);
CMutableTransaction tx7 = CMutableTransaction();
tx7.vin.resize(1);
tx7.vin[0].prevout = COutPoint(tx6.GetHash(), 0);
tx7.vin[0].scriptSig = CScript() << OP_11;
tx7.vout.resize(1);
tx7.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
tx7.vout[0].nValue = 10 * COIN;
uint64_t tx7Size = GetVirtualTransactionSize(CTransaction(tx7));
/* set the fee to just below tx2's feerate when including ancestor */
CAmount fee = (20000/tx2Size)*(tx7Size + tx6Size) - 1;
pool.addUnchecked(entry.Fee(fee).FromTx(tx7));
BOOST_CHECK_EQUAL(pool.size(), 7U);
sortedOrder.insert(sortedOrder.begin()+1, tx7.GetHash().ToString());
CheckSort<ancestor_score>(pool, sortedOrder);
/* after tx6 is mined, tx7 should move up in the sort */
std::vector<CTransactionRef> vtx;
vtx.push_back(MakeTransactionRef(tx6));
pool.removeForBlock(vtx, 1);
sortedOrder.erase(sortedOrder.begin()+1);
// Ties are broken by hash
if (tx3.GetHash() < tx6.GetHash())
sortedOrder.pop_back();
else
sortedOrder.erase(sortedOrder.end()-2);
sortedOrder.insert(sortedOrder.begin(), tx7.GetHash().ToString());
CheckSort<ancestor_score>(pool, sortedOrder);
// High-fee parent, low-fee child
// tx7 -> tx8
CMutableTransaction tx8 = CMutableTransaction();
tx8.vin.resize(1);
tx8.vin[0].prevout = COutPoint(tx7.GetHash(), 0);
tx8.vin[0].scriptSig = CScript() << OP_11;
tx8.vout.resize(1);
tx8.vout[0].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
tx8.vout[0].nValue = 10*COIN;
// Check that we sort by min(feerate, ancestor_feerate):
// set the fee so that the ancestor feerate is above tx1/5,
// but the transaction's own feerate is lower
pool.addUnchecked(entry.Fee(5000LL).FromTx(tx8));
sortedOrder.insert(sortedOrder.end()-1, tx8.GetHash().ToString());
CheckSort<ancestor_score>(pool, sortedOrder);
}
BOOST_AUTO_TEST_CASE(MempoolSizeLimitTest)
{
auto& pool = static_cast<MemPoolTest&>(*Assert(m_node.mempool));
@ -536,20 +224,23 @@ BOOST_AUTO_TEST_CASE(MempoolSizeLimitTest)
// we only require this to remove, at max, 2 txn, because it's not clear what we're really optimizing for aside from that
pool.TrimToSize(pool.DynamicMemoryUsage() - 1);
BOOST_CHECK(pool.exists(GenTxid::Txid(tx4.GetHash())));
BOOST_CHECK(pool.exists(GenTxid::Txid(tx6.GetHash())));
// Tx6 should get "chunked" with tx7, so it should be evicted as well.
BOOST_CHECK(!pool.exists(GenTxid::Txid(tx6.GetHash())));
BOOST_CHECK(!pool.exists(GenTxid::Txid(tx7.GetHash())));
if (!pool.exists(GenTxid::Txid(tx5.GetHash())))
pool.addUnchecked(entry.Fee(1000LL).FromTx(tx5));
pool.addUnchecked(entry.Fee(1100LL).FromTx(tx6));
pool.addUnchecked(entry.Fee(9000LL).FromTx(tx7));
pool.TrimToSize(pool.DynamicMemoryUsage() / 2); // should maximize mempool size by only removing 5/7
BOOST_CHECK(pool.exists(GenTxid::Txid(tx4.GetHash())));
BOOST_CHECK(!pool.exists(GenTxid::Txid(tx5.GetHash())));
BOOST_CHECK(pool.exists(GenTxid::Txid(tx6.GetHash())));
BOOST_CHECK(!pool.exists(GenTxid::Txid(tx6.GetHash())));
BOOST_CHECK(!pool.exists(GenTxid::Txid(tx7.GetHash())));
pool.addUnchecked(entry.Fee(1000LL).FromTx(tx5));
pool.addUnchecked(entry.Fee(1100LL).FromTx(tx6));
pool.addUnchecked(entry.Fee(9000LL).FromTx(tx7));
std::vector<CTransactionRef> vtx;
@ -598,7 +289,7 @@ inline CTransactionRef make_tx(std::vector<CAmount>&& output_values, std::vector
BOOST_AUTO_TEST_CASE(MempoolAncestryTests)
{
size_t ancestors, descendants;
size_t ancestors, clustersize;
CTxMemPool& pool = *Assert(m_node.mempool);
LOCK2(cs_main, pool.cs);
@ -611,10 +302,10 @@ BOOST_AUTO_TEST_CASE(MempoolAncestryTests)
CTransactionRef tx1 = make_tx(/*output_values=*/{10 * COIN});
pool.addUnchecked(entry.Fee(10000LL).FromTx(tx1));
// Ancestors / descendants should be 1 / 1 (itself / itself)
pool.GetTransactionAncestry(tx1->GetHash(), ancestors, descendants);
// Ancestors / clustersize should be 1 / 1 (itself / itself)
pool.GetTransactionAncestry(tx1->GetHash(), ancestors, clustersize);
BOOST_CHECK_EQUAL(ancestors, 1ULL);
BOOST_CHECK_EQUAL(descendants, 1ULL);
BOOST_CHECK_EQUAL(clustersize, 1ULL);
/* Child transaction */
//
@ -623,17 +314,17 @@ BOOST_AUTO_TEST_CASE(MempoolAncestryTests)
CTransactionRef tx2 = make_tx(/*output_values=*/{495 * CENT, 5 * COIN}, /*inputs=*/{tx1});
pool.addUnchecked(entry.Fee(10000LL).FromTx(tx2));
// Ancestors / descendants should be:
// transaction ancestors descendants
// Ancestors / clustersize should be:
// transaction ancestors clustersize
// ============ =========== ===========
// tx1 1 (tx1) 2 (tx1,2)
// tx2 2 (tx1,2) 2 (tx1,2)
pool.GetTransactionAncestry(tx1->GetHash(), ancestors, descendants);
pool.GetTransactionAncestry(tx1->GetHash(), ancestors, clustersize);
BOOST_CHECK_EQUAL(ancestors, 1ULL);
BOOST_CHECK_EQUAL(descendants, 2ULL);
pool.GetTransactionAncestry(tx2->GetHash(), ancestors, descendants);
BOOST_CHECK_EQUAL(clustersize, 2ULL);
pool.GetTransactionAncestry(tx2->GetHash(), ancestors, clustersize);
BOOST_CHECK_EQUAL(ancestors, 2ULL);
BOOST_CHECK_EQUAL(descendants, 2ULL);
BOOST_CHECK_EQUAL(clustersize, 2ULL);
/* Grand-child 1 */
//
@ -642,21 +333,21 @@ BOOST_AUTO_TEST_CASE(MempoolAncestryTests)
CTransactionRef tx3 = make_tx(/*output_values=*/{290 * CENT, 200 * CENT}, /*inputs=*/{tx2});
pool.addUnchecked(entry.Fee(10000LL).FromTx(tx3));
// Ancestors / descendants should be:
// transaction ancestors descendants
// Ancestors / clustersize should be:
// transaction ancestors clustersize
// ============ =========== ===========
// tx1 1 (tx1) 3 (tx1,2,3)
// tx2 2 (tx1,2) 3 (tx1,2,3)
// tx3 3 (tx1,2,3) 3 (tx1,2,3)
pool.GetTransactionAncestry(tx1->GetHash(), ancestors, descendants);
pool.GetTransactionAncestry(tx1->GetHash(), ancestors, clustersize);
BOOST_CHECK_EQUAL(ancestors, 1ULL);
BOOST_CHECK_EQUAL(descendants, 3ULL);
pool.GetTransactionAncestry(tx2->GetHash(), ancestors, descendants);
BOOST_CHECK_EQUAL(clustersize, 3ULL);
pool.GetTransactionAncestry(tx2->GetHash(), ancestors, clustersize);
BOOST_CHECK_EQUAL(ancestors, 2ULL);
BOOST_CHECK_EQUAL(descendants, 3ULL);
pool.GetTransactionAncestry(tx3->GetHash(), ancestors, descendants);
BOOST_CHECK_EQUAL(clustersize, 3ULL);
pool.GetTransactionAncestry(tx3->GetHash(), ancestors, clustersize);
BOOST_CHECK_EQUAL(ancestors, 3ULL);
BOOST_CHECK_EQUAL(descendants, 3ULL);
BOOST_CHECK_EQUAL(clustersize, 3ULL);
/* Grand-child 2 */
//
@ -667,25 +358,25 @@ BOOST_AUTO_TEST_CASE(MempoolAncestryTests)
CTransactionRef tx4 = make_tx(/*output_values=*/{290 * CENT, 250 * CENT}, /*inputs=*/{tx2}, /*input_indices=*/{1});
pool.addUnchecked(entry.Fee(10000LL).FromTx(tx4));
// Ancestors / descendants should be:
// transaction ancestors descendants
// Ancestors / clustersize should be:
// transaction ancestors clustersize
// ============ =========== ===========
// tx1 1 (tx1) 4 (tx1,2,3,4)
// tx2 2 (tx1,2) 4 (tx1,2,3,4)
// tx3 3 (tx1,2,3) 4 (tx1,2,3,4)
// tx4 3 (tx1,2,4) 4 (tx1,2,3,4)
pool.GetTransactionAncestry(tx1->GetHash(), ancestors, descendants);
pool.GetTransactionAncestry(tx1->GetHash(), ancestors, clustersize);
BOOST_CHECK_EQUAL(ancestors, 1ULL);
BOOST_CHECK_EQUAL(descendants, 4ULL);
pool.GetTransactionAncestry(tx2->GetHash(), ancestors, descendants);
BOOST_CHECK_EQUAL(clustersize, 4ULL);
pool.GetTransactionAncestry(tx2->GetHash(), ancestors, clustersize);
BOOST_CHECK_EQUAL(ancestors, 2ULL);
BOOST_CHECK_EQUAL(descendants, 4ULL);
pool.GetTransactionAncestry(tx3->GetHash(), ancestors, descendants);
BOOST_CHECK_EQUAL(clustersize, 4ULL);
pool.GetTransactionAncestry(tx3->GetHash(), ancestors, clustersize);
BOOST_CHECK_EQUAL(ancestors, 3ULL);
BOOST_CHECK_EQUAL(descendants, 4ULL);
pool.GetTransactionAncestry(tx4->GetHash(), ancestors, descendants);
BOOST_CHECK_EQUAL(clustersize, 4ULL);
pool.GetTransactionAncestry(tx4->GetHash(), ancestors, clustersize);
BOOST_CHECK_EQUAL(ancestors, 3ULL);
BOOST_CHECK_EQUAL(descendants, 4ULL);
BOOST_CHECK_EQUAL(clustersize, 4ULL);
/* Make an alternate branch that is longer and connect it to tx3 */
//
@ -703,56 +394,56 @@ BOOST_AUTO_TEST_CASE(MempoolAncestryTests)
tyi = make_tx(/*output_values=*/{v}, /*inputs=*/i > 0 ? std::vector<CTransactionRef>{*ty[i - 1]} : std::vector<CTransactionRef>{});
v -= 50 * CENT;
pool.addUnchecked(entry.Fee(10000LL).FromTx(tyi));
pool.GetTransactionAncestry(tyi->GetHash(), ancestors, descendants);
pool.GetTransactionAncestry(tyi->GetHash(), ancestors, clustersize);
BOOST_CHECK_EQUAL(ancestors, i+1);
BOOST_CHECK_EQUAL(descendants, i+1);
BOOST_CHECK_EQUAL(clustersize, i+1);
}
CTransactionRef ty6 = make_tx(/*output_values=*/{5 * COIN}, /*inputs=*/{tx3, ty5});
pool.addUnchecked(entry.Fee(10000LL).FromTx(ty6));
// Ancestors / descendants should be:
// transaction ancestors descendants
// Ancestors / clustersize should be:
// transaction ancestors clustersize
// ============ =================== ===========
// tx1 1 (tx1) 5 (tx1,2,3,4, ty6)
// tx2 2 (tx1,2) 5 (tx1,2,3,4, ty6)
// tx3 3 (tx1,2,3) 5 (tx1,2,3,4, ty6)
// tx4 3 (tx1,2,4) 5 (tx1,2,3,4, ty6)
// ty1 1 (ty1) 6 (ty1,2,3,4,5,6)
// ty2 2 (ty1,2) 6 (ty1,2,3,4,5,6)
// ty3 3 (ty1,2,3) 6 (ty1,2,3,4,5,6)
// ty4 4 (y1234) 6 (ty1,2,3,4,5,6)
// ty5 5 (y12345) 6 (ty1,2,3,4,5,6)
// ty6 9 (tx123, ty123456) 6 (ty1,2,3,4,5,6)
pool.GetTransactionAncestry(tx1->GetHash(), ancestors, descendants);
// tx1 1 (tx1) 10 (tx1-5, ty1-5)
// tx2 2 (tx1,2) 10
// tx3 3 (tx1,2,3) 10
// tx4 3 (tx1,2,4) 10
// ty1 1 (ty1) 10
// ty2 2 (ty1,2) 10
// ty3 3 (ty1,2,3) 10
// ty4 4 (y1234) 10
// ty5 5 (y12345) 10
// ty6 9 (tx123, ty123456) 10
pool.GetTransactionAncestry(tx1->GetHash(), ancestors, clustersize);
BOOST_CHECK_EQUAL(ancestors, 1ULL);
BOOST_CHECK_EQUAL(descendants, 5ULL);
pool.GetTransactionAncestry(tx2->GetHash(), ancestors, descendants);
BOOST_CHECK_EQUAL(clustersize, 10ULL);
pool.GetTransactionAncestry(tx2->GetHash(), ancestors, clustersize);
BOOST_CHECK_EQUAL(ancestors, 2ULL);
BOOST_CHECK_EQUAL(descendants, 5ULL);
pool.GetTransactionAncestry(tx3->GetHash(), ancestors, descendants);
BOOST_CHECK_EQUAL(clustersize, 10ULL);
pool.GetTransactionAncestry(tx3->GetHash(), ancestors, clustersize);
BOOST_CHECK_EQUAL(ancestors, 3ULL);
BOOST_CHECK_EQUAL(descendants, 5ULL);
pool.GetTransactionAncestry(tx4->GetHash(), ancestors, descendants);
BOOST_CHECK_EQUAL(clustersize, 10ULL);
pool.GetTransactionAncestry(tx4->GetHash(), ancestors, clustersize);
BOOST_CHECK_EQUAL(ancestors, 3ULL);
BOOST_CHECK_EQUAL(descendants, 5ULL);
pool.GetTransactionAncestry(ty1->GetHash(), ancestors, descendants);
BOOST_CHECK_EQUAL(clustersize, 10ULL);
pool.GetTransactionAncestry(ty1->GetHash(), ancestors, clustersize);
BOOST_CHECK_EQUAL(ancestors, 1ULL);
BOOST_CHECK_EQUAL(descendants, 6ULL);
pool.GetTransactionAncestry(ty2->GetHash(), ancestors, descendants);
BOOST_CHECK_EQUAL(clustersize, 10ULL);
pool.GetTransactionAncestry(ty2->GetHash(), ancestors, clustersize);
BOOST_CHECK_EQUAL(ancestors, 2ULL);
BOOST_CHECK_EQUAL(descendants, 6ULL);
pool.GetTransactionAncestry(ty3->GetHash(), ancestors, descendants);
BOOST_CHECK_EQUAL(clustersize, 10ULL);
pool.GetTransactionAncestry(ty3->GetHash(), ancestors, clustersize);
BOOST_CHECK_EQUAL(ancestors, 3ULL);
BOOST_CHECK_EQUAL(descendants, 6ULL);
pool.GetTransactionAncestry(ty4->GetHash(), ancestors, descendants);
BOOST_CHECK_EQUAL(clustersize, 10ULL);
pool.GetTransactionAncestry(ty4->GetHash(), ancestors, clustersize);
BOOST_CHECK_EQUAL(ancestors, 4ULL);
BOOST_CHECK_EQUAL(descendants, 6ULL);
pool.GetTransactionAncestry(ty5->GetHash(), ancestors, descendants);
BOOST_CHECK_EQUAL(clustersize, 10ULL);
pool.GetTransactionAncestry(ty5->GetHash(), ancestors, clustersize);
BOOST_CHECK_EQUAL(ancestors, 5ULL);
BOOST_CHECK_EQUAL(descendants, 6ULL);
pool.GetTransactionAncestry(ty6->GetHash(), ancestors, descendants);
BOOST_CHECK_EQUAL(clustersize, 10ULL);
pool.GetTransactionAncestry(ty6->GetHash(), ancestors, clustersize);
BOOST_CHECK_EQUAL(ancestors, 9ULL);
BOOST_CHECK_EQUAL(descendants, 6ULL);
BOOST_CHECK_EQUAL(clustersize, 10ULL);
}
BOOST_AUTO_TEST_CASE(MempoolAncestryTestsDiamond)

View File

@ -448,14 +448,25 @@ BOOST_FIXTURE_TEST_CASE(miniminer_overlap, TestChain100Setup)
BOOST_CHECK(tx2_feerate > tx3_feerate);
const auto tx3_anc_feerate = CFeeRate(low_fee + med_fee + high_fee + high_fee, tx_vsizes[0] + tx_vsizes[1] + tx_vsizes[2] + tx_vsizes[3]);
const auto& tx3_entry{*Assert(pool.GetEntry(tx3->GetHash()))};
BOOST_CHECK(tx3_anc_feerate == CFeeRate(tx3_entry.GetModFeesWithAncestors(), tx3_entry.GetSizeWithAncestors()));
// Check that ancestor feerate is calculated correctly.
size_t dummy_count{0};
CAmount mod_fees{0};
size_t ancestor_vsize{0};
pool.CalculateAncestorData(tx3_entry, dummy_count, ancestor_vsize, mod_fees);
BOOST_CHECK(tx3_anc_feerate == CFeeRate(mod_fees, ancestor_vsize));
const auto tx4_feerate = CFeeRate(high_fee, tx_vsizes[4]);
const auto tx6_anc_feerate = CFeeRate(high_fee + low_fee + med_fee, tx_vsizes[4] + tx_vsizes[5] + tx_vsizes[6]);
const auto& tx6_entry{*Assert(pool.GetEntry(tx6->GetHash()))};
BOOST_CHECK(tx6_anc_feerate == CFeeRate(tx6_entry.GetModFeesWithAncestors(), tx6_entry.GetSizeWithAncestors()));
pool.CalculateAncestorData(tx6_entry, dummy_count, ancestor_vsize, mod_fees);
BOOST_CHECK(tx6_anc_feerate == CFeeRate(mod_fees, ancestor_vsize));
const auto tx7_anc_feerate = CFeeRate(high_fee + low_fee + high_fee, tx_vsizes[4] + tx_vsizes[5] + tx_vsizes[7]);
const auto& tx7_entry{*Assert(pool.GetEntry(tx7->GetHash()))};
BOOST_CHECK(tx7_anc_feerate == CFeeRate(tx7_entry.GetModFeesWithAncestors(), tx7_entry.GetSizeWithAncestors()));
pool.CalculateAncestorData(tx7_entry, dummy_count, ancestor_vsize, mod_fees);
BOOST_CHECK(tx7_anc_feerate == CFeeRate(mod_fees, ancestor_vsize));
BOOST_CHECK(tx4_feerate > tx6_anc_feerate);
BOOST_CHECK(tx4_feerate > tx7_anc_feerate);

View File

@ -39,35 +39,6 @@ static inline CTransactionRef make_tx(const std::vector<CTransactionRef>& inputs
// Make two child transactions from parent (which must have at least 2 outputs).
// Each tx will have the same outputs, using the amounts specified in output_values.
static inline std::pair<CTransactionRef, CTransactionRef> make_two_siblings(const CTransactionRef parent,
const std::vector<CAmount>& output_values)
{
assert(parent->vout.size() >= 2);
// First tx takes first parent output
CMutableTransaction tx1 = CMutableTransaction();
tx1.vin.resize(1);
tx1.vout.resize(output_values.size());
tx1.vin[0].prevout.hash = parent->GetHash();
tx1.vin[0].prevout.n = 0;
// Add a witness so wtxid != txid
CScriptWitness witness;
witness.stack.emplace_back(10);
tx1.vin[0].scriptWitness = witness;
for (size_t i = 0; i < output_values.size(); ++i) {
tx1.vout[i].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
tx1.vout[i].nValue = output_values[i];
}
// Second tx takes second parent output
CMutableTransaction tx2 = tx1;
tx2.vin[0].prevout.n = 1;
return std::make_pair(MakeTransactionRef(tx1), MakeTransactionRef(tx2));
}
static CTransactionRef add_descendants(const CTransactionRef& tx, int32_t num_descendants, CTxMemPool& pool)
EXCLUSIVE_LOCKS_REQUIRED(::cs_main, pool.cs)
{
@ -85,33 +56,6 @@ static CTransactionRef add_descendants(const CTransactionRef& tx, int32_t num_de
return tx_to_spend;
}
static CTransactionRef add_descendant_to_parents(const std::vector<CTransactionRef>& parents, CTxMemPool& pool)
EXCLUSIVE_LOCKS_REQUIRED(::cs_main, pool.cs)
{
AssertLockHeld(::cs_main);
AssertLockHeld(pool.cs);
TestMemPoolEntryHelper entry;
// Assumes this isn't already spent in mempool
auto child_tx = make_tx(/*inputs=*/parents, /*output_values=*/{50 * CENT});
pool.addUnchecked(entry.FromTx(child_tx));
// Return last created tx
return child_tx;
}
// Makes two children for a single parent
static std::pair<CTransactionRef, CTransactionRef> add_children_to_parent(const CTransactionRef parent, CTxMemPool& pool)
EXCLUSIVE_LOCKS_REQUIRED(::cs_main, pool.cs)
{
AssertLockHeld(::cs_main);
AssertLockHeld(pool.cs);
TestMemPoolEntryHelper entry;
// Assumes this isn't already spent in mempool
auto children_tx = make_two_siblings(/*parent=*/parent, /*output_values=*/{50 * CENT});
pool.addUnchecked(entry.FromTx(children_tx.first));
pool.addUnchecked(entry.FromTx(children_tx.second));
return children_tx;
}
BOOST_FIXTURE_TEST_CASE(rbf_helper_functions, TestChain100Setup)
{
CTxMemPool& pool = *Assert(m_node.mempool);
@ -173,11 +117,6 @@ BOOST_FIXTURE_TEST_CASE(rbf_helper_functions, TestChain100Setup)
const auto entry6_low_prioritised = pool.GetIter(tx6->GetHash()).value();
const auto entry7_high = pool.GetIter(tx7->GetHash()).value();
const auto entry8_high = pool.GetIter(tx8->GetHash()).value();
const auto entry9_unchained = pool.GetIter(tx9->GetHash()).value();
const auto entry10_unchained = pool.GetIter(tx10->GetHash()).value();
const auto entry11_unchained = pool.GetIter(tx11->GetHash()).value();
const auto entry12_unchained = pool.GetIter(tx12->GetHash()).value();
const auto entry13_unchained = pool.GetIter(tx13->GetHash()).value();
BOOST_CHECK_EQUAL(entry1_normal->GetFee(), normal_fee);
BOOST_CHECK_EQUAL(entry2_normal->GetFee(), normal_fee);
@ -198,23 +137,6 @@ BOOST_FIXTURE_TEST_CASE(rbf_helper_functions, TestChain100Setup)
const auto unused_txid{GetRandHash()};
// Tests for PaysMoreThanConflicts
// These tests use feerate, not absolute fee.
BOOST_CHECK(PaysMoreThanConflicts(/*iters_conflicting=*/set_12_normal,
/*replacement_feerate=*/CFeeRate(entry1_normal->GetModifiedFee() + 1, entry1_normal->GetTxSize() + 2),
/*txid=*/unused_txid).has_value());
// Replacement must be strictly greater than the originals.
BOOST_CHECK(PaysMoreThanConflicts(set_12_normal, CFeeRate(entry1_normal->GetModifiedFee(), entry1_normal->GetTxSize()), unused_txid).has_value());
BOOST_CHECK(PaysMoreThanConflicts(set_12_normal, CFeeRate(entry1_normal->GetModifiedFee() + 1, entry1_normal->GetTxSize()), unused_txid) == std::nullopt);
// These tests use modified fees (including prioritisation), not base fees.
BOOST_CHECK(PaysMoreThanConflicts({entry5_low}, CFeeRate(entry5_low->GetModifiedFee() + 1, entry5_low->GetTxSize()), unused_txid) == std::nullopt);
BOOST_CHECK(PaysMoreThanConflicts({entry6_low_prioritised}, CFeeRate(entry6_low_prioritised->GetFee() + 1, entry6_low_prioritised->GetTxSize()), unused_txid).has_value());
BOOST_CHECK(PaysMoreThanConflicts({entry6_low_prioritised}, CFeeRate(entry6_low_prioritised->GetModifiedFee() + 1, entry6_low_prioritised->GetTxSize()), unused_txid) == std::nullopt);
// PaysMoreThanConflicts checks individual feerate, not ancestor feerate. This test compares
// replacement_feerate and entry4_high's feerate, which are the same. The replacement_feerate is
// considered too low even though entry4_high has a low ancestor feerate.
BOOST_CHECK(PaysMoreThanConflicts(set_34_cpfp, CFeeRate(entry4_high->GetModifiedFee(), entry4_high->GetTxSize()), unused_txid).has_value());
// Tests for EntriesAndTxidsDisjoint
BOOST_CHECK(EntriesAndTxidsDisjoint(empty_set, {tx1->GetHash()}, unused_txid) == std::nullopt);
BOOST_CHECK(EntriesAndTxidsDisjoint(set_12_normal, {tx3->GetHash()}, unused_txid) == std::nullopt);
@ -285,71 +207,12 @@ BOOST_FIXTURE_TEST_CASE(rbf_helper_functions, TestChain100Setup)
BOOST_CHECK_EQUAL(all_conflicts.size(), 100);
all_conflicts.clear();
// Exceeds maximum number of conflicts.
// If we treat all conflicts as being direct conflicts, then we should exceed the replacement limit.
add_descendants(tx8, 1, pool);
BOOST_CHECK(GetEntriesForConflicts(*conflicts_with_parents.get(), pool, all_parents, all_conflicts).has_value());
// Tests for HasNoNewUnconfirmed
const auto spends_unconfirmed = make_tx({tx1}, {36 * CENT});
for (const auto& input : spends_unconfirmed->vin) {
// Spends unconfirmed inputs.
BOOST_CHECK(pool.exists(GenTxid::Txid(input.prevout.hash)));
}
BOOST_CHECK(HasNoNewUnconfirmed(/*tx=*/ *spends_unconfirmed.get(),
/*pool=*/ pool,
/*iters_conflicting=*/ all_entries) == std::nullopt);
BOOST_CHECK(HasNoNewUnconfirmed(*spends_unconfirmed.get(), pool, {entry2_normal}) == std::nullopt);
BOOST_CHECK(HasNoNewUnconfirmed(*spends_unconfirmed.get(), pool, empty_set).has_value());
const auto spends_new_unconfirmed = make_tx({tx1, tx8}, {36 * CENT});
BOOST_CHECK(HasNoNewUnconfirmed(*spends_new_unconfirmed.get(), pool, {entry2_normal}).has_value());
BOOST_CHECK(HasNoNewUnconfirmed(*spends_new_unconfirmed.get(), pool, all_entries).has_value());
const auto spends_conflicting_confirmed = make_tx({m_coinbase_txns[0], m_coinbase_txns[1]}, {45 * CENT});
BOOST_CHECK(HasNoNewUnconfirmed(*spends_conflicting_confirmed.get(), pool, {entry1_normal, entry3_low}) == std::nullopt);
// Tests for CheckConflictTopology
// Tx4 has 23 descendants
BOOST_CHECK_EQUAL(pool.CheckConflictTopology(set_34_cpfp).value(), strprintf("%s has 23 descendants, max 1 allowed", entry4_high->GetSharedTx()->GetHash().ToString()));
// No descendants yet
BOOST_CHECK(pool.CheckConflictTopology({entry9_unchained}) == std::nullopt);
// Add 1 descendant, still ok
add_descendants(tx9, 1, pool);
BOOST_CHECK(pool.CheckConflictTopology({entry9_unchained}) == std::nullopt);
// N direct conflicts; ok
BOOST_CHECK(pool.CheckConflictTopology({entry9_unchained, entry10_unchained, entry11_unchained}) == std::nullopt);
// Add 1 descendant, still ok, even if it's considered a direct conflict as well
const auto child_tx = add_descendants(tx10, 1, pool);
const auto entry10_child = pool.GetIter(child_tx->GetHash()).value();
BOOST_CHECK(pool.CheckConflictTopology({entry9_unchained, entry10_unchained, entry11_unchained}) == std::nullopt);
BOOST_CHECK(pool.CheckConflictTopology({entry9_unchained, entry10_unchained, entry11_unchained, entry10_child}) == std::nullopt);
// One more, size 3 cluster too much
const auto grand_child_tx = add_descendants(child_tx, 1, pool);
const auto entry10_grand_child = pool.GetIter(grand_child_tx->GetHash()).value();
BOOST_CHECK_EQUAL(pool.CheckConflictTopology({entry9_unchained, entry10_unchained, entry11_unchained}).value(), strprintf("%s has 2 descendants, max 1 allowed", entry10_unchained->GetSharedTx()->GetHash().ToString()));
// even if direct conflict is descendent itself
BOOST_CHECK_EQUAL(pool.CheckConflictTopology({entry9_unchained, entry10_grand_child, entry11_unchained}).value(), strprintf("%s has 2 ancestors, max 1 allowed", entry10_grand_child->GetSharedTx()->GetHash().ToString()));
// Make a single child from two singleton parents
const auto two_parent_child_tx = add_descendant_to_parents({tx11, tx12}, pool);
const auto entry_two_parent_child = pool.GetIter(two_parent_child_tx->GetHash()).value();
BOOST_CHECK_EQUAL(pool.CheckConflictTopology({entry11_unchained}).value(), strprintf("%s is not the only parent of child %s", entry11_unchained->GetSharedTx()->GetHash().ToString(), entry_two_parent_child->GetSharedTx()->GetHash().ToString()));
BOOST_CHECK_EQUAL(pool.CheckConflictTopology({entry12_unchained}).value(), strprintf("%s is not the only parent of child %s", entry12_unchained->GetSharedTx()->GetHash().ToString(), entry_two_parent_child->GetSharedTx()->GetHash().ToString()));
BOOST_CHECK_EQUAL(pool.CheckConflictTopology({entry_two_parent_child}).value(), strprintf("%s has 2 ancestors, max 1 allowed", entry_two_parent_child->GetSharedTx()->GetHash().ToString()));
// Single parent with two children, we will conflict with the siblings directly only
const auto two_siblings = add_children_to_parent(tx13, pool);
const auto entry_sibling_1 = pool.GetIter(two_siblings.first->GetHash()).value();
const auto entry_sibling_2 = pool.GetIter(two_siblings.second->GetHash()).value();
BOOST_CHECK_EQUAL(pool.CheckConflictTopology({entry_sibling_1}).value(), strprintf("%s is not the only child of parent %s", entry_sibling_1->GetSharedTx()->GetHash().ToString(), entry13_unchained->GetSharedTx()->GetHash().ToString()));
BOOST_CHECK_EQUAL(pool.CheckConflictTopology({entry_sibling_2}).value(), strprintf("%s is not the only child of parent %s", entry_sibling_2->GetSharedTx()->GetHash().ToString(), entry13_unchained->GetSharedTx()->GetHash().ToString()));
BOOST_CHECK(GetEntriesForConflicts(*conflicts_with_parents.get(), pool, all_parents, all_conflicts) == std::nullopt);
BOOST_CHECK_EQUAL(all_conflicts.size(), 101);
CTxMemPool::setEntries dummy;
BOOST_CHECK(GetEntriesForConflicts(*conflicts_with_parents.get(), pool, all_conflicts, dummy).has_value());
}
BOOST_FIXTURE_TEST_CASE(improves_feerate, TestChain100Setup)
@ -362,48 +225,46 @@ BOOST_FIXTURE_TEST_CASE(improves_feerate, TestChain100Setup)
const CAmount normal_fee{CENT/10};
// low feerate parent with normal feerate child
const auto tx1 = make_tx(/*inputs=*/ {m_coinbase_txns[0]}, /*output_values=*/ {10 * COIN});
const auto tx1 = make_tx(/*inputs=*/ {m_coinbase_txns[0], m_coinbase_txns[1]}, /*output_values=*/ {10 * COIN});
pool.addUnchecked(entry.Fee(low_fee).FromTx(tx1));
const auto tx2 = make_tx(/*inputs=*/ {tx1}, /*output_values=*/ {995 * CENT});
pool.addUnchecked(entry.Fee(normal_fee).FromTx(tx2));
const auto entry1 = pool.GetIter(tx1->GetHash()).value();
const auto tx1_fee = entry1->GetModifiedFee();
const auto tx1_size = entry1->GetTxSize();
const auto entry2 = pool.GetIter(tx2->GetHash()).value();
const auto tx2_fee = entry2->GetModifiedFee();
const auto tx2_size = entry2->GetTxSize();
const CAmount tx2_fee = entry2->GetModifiedFee();
// conflicting transactions
const auto tx1_conflict = make_tx(/*inputs=*/ {m_coinbase_txns[0], m_coinbase_txns[2]}, /*output_values=*/ {10 * COIN});
auto entry1_conflict = entry.FromTx(tx1_conflict);
const auto tx3 = make_tx(/*inputs=*/ {tx1_conflict}, /*output_values=*/ {995 * CENT});
auto entry3 = entry.FromTx(tx3);
// Now test ImprovesFeerateDiagram with various levels of "package rbf" feerates
// It doesn't improve itself
const auto res1 = ImprovesFeerateDiagram(pool, {entry1}, {entry1, entry2}, tx1_fee + tx2_fee, tx1_size + tx2_size);
// It doesn't improve "itself"
const auto res1 = ImprovesFeerateDiagram(pool, {entry1}, {entry1, entry2}, {{&entry1_conflict, tx1_fee}, {&entry3, tx2_fee}});
BOOST_CHECK(res1.has_value());
BOOST_CHECK(res1.value().first == DiagramCheckError::FAILURE);
BOOST_CHECK(res1.value().second == "insufficient feerate: does not improve feerate diagram");
// With one more satoshi it does
BOOST_CHECK(ImprovesFeerateDiagram(pool, {entry1}, {entry1, entry2}, tx1_fee + tx2_fee + 1, tx1_size + tx2_size) == std::nullopt);
BOOST_CHECK(ImprovesFeerateDiagram(pool, {entry1}, {entry1, entry2}, {{&entry1_conflict, tx1_fee+1}, {&entry3, tx2_fee}}) == std::nullopt);
// With prioritisation of in-mempool conflicts, it affects the results of the comparison using the same args as just above
pool.PrioritiseTransaction(entry1->GetSharedTx()->GetHash(), /*nFeeDelta=*/1);
const auto res2 = ImprovesFeerateDiagram(pool, {entry1}, {entry1, entry2}, tx1_fee + tx2_fee + 1, tx1_size + tx2_size);
const auto res2 = ImprovesFeerateDiagram(pool, {entry1}, {entry1, entry2}, {{&entry1_conflict, tx1_fee+1}, {&entry3, tx2_fee}});
BOOST_CHECK(res2.has_value());
BOOST_CHECK(res2.value().first == DiagramCheckError::FAILURE);
BOOST_CHECK(res2.value().second == "insufficient feerate: does not improve feerate diagram");
pool.PrioritiseTransaction(entry1->GetSharedTx()->GetHash(), /*nFeeDelta=*/-1);
// With one less vB it does
BOOST_CHECK(ImprovesFeerateDiagram(pool, {entry1}, {entry1, entry2}, tx1_fee + tx2_fee, tx1_size + tx2_size - 1) == std::nullopt);
// Adding a grandchild makes the cluster size 3, which is uncalculable
const auto tx3 = make_tx(/*inputs=*/ {tx2}, /*output_values=*/ {995 * CENT});
pool.addUnchecked(entry.Fee(normal_fee).FromTx(tx3));
const auto res3 = ImprovesFeerateDiagram(pool, {entry1}, {entry1, entry2}, tx1_fee + tx2_fee + 1, tx1_size + tx2_size);
BOOST_CHECK(res3.has_value());
BOOST_CHECK(res3.value().first == DiagramCheckError::UNCALCULABLE);
BOOST_CHECK(res3.value().second == strprintf("%s has 2 descendants, max 1 allowed", tx1->GetHash().GetHex()));
// With fewer vbytes it does
CMutableTransaction tx4{entry3.GetTx()};
tx4.vin[0].scriptWitness = CScriptWitness(); // Clear out the witness, to reduce size
auto entry4 = entry.FromTx(MakeTransactionRef(tx4));
BOOST_CHECK(ImprovesFeerateDiagram(pool, {entry1}, {entry1, entry2}, {{&entry1_conflict, tx1_fee}, {&entry4, tx2_fee}}) == std::nullopt);
}
BOOST_FIXTURE_TEST_CASE(calc_feerate_diagram_rbf, TestChain100Setup)
@ -413,7 +274,6 @@ BOOST_FIXTURE_TEST_CASE(calc_feerate_diagram_rbf, TestChain100Setup)
TestMemPoolEntryHelper entry;
const CAmount low_fee{CENT/100};
const CAmount normal_fee{CENT/10};
const CAmount high_fee{CENT};
// low -> high -> medium fee transactions that would result in two chunks together since they
@ -424,23 +284,26 @@ BOOST_FIXTURE_TEST_CASE(calc_feerate_diagram_rbf, TestChain100Setup)
const auto entry_low = pool.GetIter(low_tx->GetHash()).value();
const auto low_size = entry_low->GetTxSize();
const auto replacement_tx = make_tx(/*inputs=*/ {m_coinbase_txns[0]}, /*output_values=*/ {10 * COIN});
auto entry_replacement = entry.FromTx(replacement_tx);
// Replacement of size 1
{
const auto replace_one{pool.CalculateChunksForRBF(/*replacement_fees=*/0, /*replacement_vsize=*/1, {entry_low}, {entry_low})};
const auto replace_one{pool.CalculateChunksForRBF({{&entry_replacement, 0}}, {entry_low}, {entry_low})};
BOOST_CHECK(replace_one.has_value());
std::vector<FeeFrac> expected_old_chunks{{low_fee, low_size}};
BOOST_CHECK(replace_one->first == expected_old_chunks);
std::vector<FeeFrac> expected_new_chunks{{0, 1}};
std::vector<FeeFrac> expected_new_chunks{{0, entry_replacement.GetTxSize()}};
BOOST_CHECK(replace_one->second == expected_new_chunks);
}
// Non-zero replacement fee/size
{
const auto replace_one_fee{pool.CalculateChunksForRBF(/*replacement_fees=*/high_fee, /*replacement_vsize=*/low_size, {entry_low}, {entry_low})};
const auto replace_one_fee{pool.CalculateChunksForRBF({{&entry_replacement, high_fee}}, {entry_low}, {entry_low})};
BOOST_CHECK(replace_one_fee.has_value());
std::vector<FeeFrac> expected_old_diagram{{low_fee, low_size}};
BOOST_CHECK(replace_one_fee->first == expected_old_diagram);
std::vector<FeeFrac> expected_new_diagram{{high_fee, low_size}};
std::vector<FeeFrac> expected_new_diagram{{high_fee, entry_replacement.GetTxSize()}};
BOOST_CHECK(replace_one_fee->second == expected_new_diagram);
}
@ -451,36 +314,24 @@ BOOST_FIXTURE_TEST_CASE(calc_feerate_diagram_rbf, TestChain100Setup)
const auto high_size = entry_high->GetTxSize();
{
const auto replace_single_chunk{pool.CalculateChunksForRBF(/*replacement_fees=*/high_fee, /*replacement_vsize=*/low_size, {entry_low}, {entry_low, entry_high})};
const auto replace_single_chunk{pool.CalculateChunksForRBF({{&entry_replacement, high_fee}}, {entry_low}, {entry_low, entry_high})};
BOOST_CHECK(replace_single_chunk.has_value());
std::vector<FeeFrac> expected_old_chunks{{low_fee + high_fee, low_size + high_size}};
BOOST_CHECK(replace_single_chunk->first == expected_old_chunks);
std::vector<FeeFrac> expected_new_chunks{{high_fee, low_size}};
std::vector<FeeFrac> expected_new_chunks{{high_fee, entry_replacement.GetTxSize()}};
BOOST_CHECK(replace_single_chunk->second == expected_new_chunks);
}
// Conflict with the 2nd tx, resulting in new diagram with three entries
{
const auto replace_cpfp_child{pool.CalculateChunksForRBF(/*replacement_fees=*/high_fee, /*replacement_vsize=*/low_size, {entry_high}, {entry_high})};
const auto replace_cpfp_child{pool.CalculateChunksForRBF({{&entry_replacement, high_fee}}, {entry_high}, {entry_high})};
BOOST_CHECK(replace_cpfp_child.has_value());
std::vector<FeeFrac> expected_old_chunks{{low_fee + high_fee, low_size + high_size}};
BOOST_CHECK(replace_cpfp_child->first == expected_old_chunks);
std::vector<FeeFrac> expected_new_chunks{{high_fee, low_size}, {low_fee, low_size}};
std::vector<FeeFrac> expected_new_chunks{{high_fee, entry_replacement.GetTxSize()}, {low_fee, low_size}};
BOOST_CHECK(replace_cpfp_child->second == expected_new_chunks);
}
// third transaction causes the topology check to fail
const auto normal_tx = make_tx(/*inputs=*/ {high_tx}, /*output_values=*/ {995 * CENT});
pool.addUnchecked(entry.Fee(normal_fee).FromTx(normal_tx));
const auto entry_normal = pool.GetIter(normal_tx->GetHash()).value();
const auto normal_size = entry_normal->GetTxSize();
{
const auto replace_too_large{pool.CalculateChunksForRBF(/*replacement_fees=*/normal_fee, /*replacement_vsize=*/normal_size, {entry_low}, {entry_low, entry_high, entry_normal})};
BOOST_CHECK(!replace_too_large.has_value());
BOOST_CHECK_EQUAL(util::ErrorString(replace_too_large).original, strprintf("%s has 2 descendants, max 1 allowed", low_tx->GetHash().GetHex()));
}
// Make a size 2 cluster that is itself two chunks; evict both txns
const auto high_tx_2 = make_tx(/*inputs=*/ {m_coinbase_txns[1]}, /*output_values=*/ {10 * COIN});
pool.addUnchecked(entry.Fee(high_fee).FromTx(high_tx_2));
@ -493,7 +344,7 @@ BOOST_FIXTURE_TEST_CASE(calc_feerate_diagram_rbf, TestChain100Setup)
const auto low_size_2 = entry_low_2->GetTxSize();
{
const auto replace_two_chunks_single_cluster{pool.CalculateChunksForRBF(/*replacement_fees=*/high_fee, /*replacement_vsize=*/low_size, {entry_high_2}, {entry_high_2, entry_low_2})};
const auto replace_two_chunks_single_cluster{pool.CalculateChunksForRBF({{&entry_replacement, high_fee}}, {entry_high_2}, {entry_high_2, entry_low_2})};
BOOST_CHECK(replace_two_chunks_single_cluster.has_value());
std::vector<FeeFrac> expected_old_chunks{{high_fee, high_size_2}, {low_fee, low_size_2}};
BOOST_CHECK(replace_two_chunks_single_cluster->first == expected_old_chunks);
@ -501,7 +352,7 @@ BOOST_FIXTURE_TEST_CASE(calc_feerate_diagram_rbf, TestChain100Setup)
BOOST_CHECK(replace_two_chunks_single_cluster->second == expected_new_chunks);
}
// You can have more than two direct conflicts if the there are multiple affected clusters, all of size 2 or less
// You can have more than two direct conflicts
const auto conflict_1 = make_tx(/*inputs=*/ {m_coinbase_txns[2]}, /*output_values=*/ {10 * COIN});
pool.addUnchecked(entry.Fee(low_fee).FromTx(conflict_1));
const auto conflict_1_entry = pool.GetIter(conflict_1->GetHash()).value();
@ -515,7 +366,7 @@ BOOST_FIXTURE_TEST_CASE(calc_feerate_diagram_rbf, TestChain100Setup)
const auto conflict_3_entry = pool.GetIter(conflict_3->GetHash()).value();
{
const auto replace_multiple_clusters{pool.CalculateChunksForRBF(/*replacement_fees=*/high_fee, /*replacement_vsize=*/low_size, {conflict_1_entry, conflict_2_entry, conflict_3_entry}, {conflict_1_entry, conflict_2_entry, conflict_3_entry})};
const auto replace_multiple_clusters{pool.CalculateChunksForRBF({{&entry_replacement, high_fee}}, {conflict_1_entry, conflict_2_entry, conflict_3_entry}, {conflict_1_entry, conflict_2_entry, conflict_3_entry})};
BOOST_CHECK(replace_multiple_clusters.has_value());
BOOST_CHECK(replace_multiple_clusters->first.size() == 3);
BOOST_CHECK(replace_multiple_clusters->second.size() == 1);
@ -527,24 +378,12 @@ BOOST_FIXTURE_TEST_CASE(calc_feerate_diagram_rbf, TestChain100Setup)
const auto conflict_1_child_entry = pool.GetIter(conflict_1_child->GetHash()).value();
{
const auto replace_multiple_clusters_2{pool.CalculateChunksForRBF(/*replacement_fees=*/high_fee, /*replacement_vsize=*/low_size, {conflict_1_entry, conflict_2_entry, conflict_3_entry}, {conflict_1_entry, conflict_2_entry, conflict_3_entry, conflict_1_child_entry})};
const auto replace_multiple_clusters_2{pool.CalculateChunksForRBF({{&entry_replacement, high_fee}}, {conflict_1_entry, conflict_2_entry, conflict_3_entry}, {conflict_1_entry, conflict_2_entry, conflict_3_entry, conflict_1_child_entry})};
BOOST_CHECK(replace_multiple_clusters_2.has_value());
BOOST_CHECK(replace_multiple_clusters_2->first.size() == 4);
BOOST_CHECK(replace_multiple_clusters_2->second.size() == 1);
}
// Add another descendant to conflict_1, making the cluster size > 2 should fail at this point.
const auto conflict_1_grand_child = make_tx(/*inputs=*/{conflict_1_child}, /*output_values=*/ {995 * CENT});
pool.addUnchecked(entry.Fee(high_fee).FromTx(conflict_1_grand_child));
const auto conflict_1_grand_child_entry = pool.GetIter(conflict_1_child->GetHash()).value();
{
const auto replace_cluster_size_3{pool.CalculateChunksForRBF(/*replacement_fees=*/high_fee, /*replacement_vsize=*/low_size, {conflict_1_entry, conflict_2_entry, conflict_3_entry}, {conflict_1_entry, conflict_2_entry, conflict_3_entry, conflict_1_child_entry, conflict_1_grand_child_entry})};
BOOST_CHECK(!replace_cluster_size_3.has_value());
BOOST_CHECK_EQUAL(util::ErrorString(replace_cluster_size_3).original, strprintf("%s has 2 descendants, max 1 allowed", conflict_1->GetHash().GetHex()));
}
}
BOOST_AUTO_TEST_CASE(feerate_chunks_utilities)

409
src/test/txgraph_tests.cpp Normal file
View File

@ -0,0 +1,409 @@
// Copyright (c) 2024 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <kernel/txgraph.h>
#include <test/util/setup_common.h>
#include <boost/test/unit_test.hpp>
#include <random.h>
BOOST_AUTO_TEST_SUITE(txgraph_tests)
BOOST_AUTO_TEST_CASE(TxEntryTest)
{
TxEntry entry(100, 100);
BOOST_CHECK_EQUAL(entry.GetTxEntryParents().size(), 0);
BOOST_CHECK_EQUAL(entry.GetTxEntryChildren().size(), 0);
BOOST_CHECK_EQUAL(entry.GetTxSize(), 100);
BOOST_CHECK_EQUAL(entry.GetModifiedFee(), 100);
BOOST_CHECK(entry.m_cluster == nullptr);
TxEntry entry2(200, 200);
BOOST_CHECK_EQUAL(entry2.GetTxSize(), 200);
BOOST_CHECK_EQUAL(entry2.GetModifiedFee(), 200);
// If CompareById changes, then these tests may need to be changed as well.
BOOST_CHECK(TxEntry::CompareById()(entry, entry2));
BOOST_CHECK(!TxEntry::CompareById()(entry2, entry));
entry2.GetTxEntryParents().insert(entry);
entry.GetTxEntryChildren().insert(entry2);
BOOST_CHECK_EQUAL(entry.GetTxEntryChildren().size(), 1);
BOOST_CHECK_EQUAL(entry2.GetTxEntryParents().size(), 1);
}
BOOST_AUTO_TEST_CASE(TxGraphClusterAddRemoveTest)
{
TxGraph dummy;
TxGraphCluster cluster(1, &dummy);
TxEntry entry1(100, 100);
entry1.m_cluster = &cluster;
cluster.AddTransaction(entry1, true);
BOOST_CHECK_EQUAL(entry1.m_cluster, &cluster);
BOOST_CHECK_EQUAL(entry1.m_loc.first, 0); // must be first chunk
BOOST_CHECK_EQUAL(entry1.m_loc.second->get().unique_id, entry1.unique_id); // must be first chunk
BOOST_CHECK_EQUAL(cluster.m_chunks.size(), 1);
BOOST_CHECK_EQUAL(cluster.m_tx_count, 1);
BOOST_CHECK_EQUAL(cluster.m_tx_size, 100);
BOOST_CHECK(cluster.GetMemoryUsage() > 0);
TxEntry entry2(1, 500);
entry1.GetTxEntryChildren().insert(entry2);
entry2.GetTxEntryParents().insert(entry1);
entry2.m_cluster = &cluster;
cluster.AddTransaction(entry2, true);
BOOST_CHECK_EQUAL(entry2.m_cluster, &cluster);
BOOST_CHECK_EQUAL(cluster.m_tx_count, 2);
BOOST_CHECK_EQUAL(cluster.m_tx_size, 101);
// Check that the topology is respected and entry1 is before entry2
BOOST_CHECK(entry1.m_loc.first == 0);
BOOST_CHECK(entry2.m_loc.first >= entry1.m_loc.first);
if (entry2.m_loc.first == entry1.m_loc.first) {
// If they are in the same chunk, then entry1 must be before entry2
BOOST_CHECK(entry1.m_loc.second == cluster.m_chunks[0].txs.begin());
}
// Removing tx1 should make tx2 best.
entry2.GetTxEntryParents().erase(entry1);
entry1.GetTxEntryChildren().erase(entry2);
cluster.RemoveTransaction(entry1);
cluster.Sort();
BOOST_CHECK_EQUAL(entry2.m_cluster, &cluster);
BOOST_CHECK_EQUAL(entry2.m_loc.first, 0);
BOOST_CHECK(entry2.m_loc.second == cluster.m_chunks[0].txs.begin());
BOOST_CHECK_EQUAL(cluster.m_tx_count, 1);
BOOST_CHECK_EQUAL(cluster.m_tx_size, 1);
BOOST_CHECK(cluster.GetMemoryUsage() > 0);
}
BOOST_AUTO_TEST_CASE(TxGraphClusterRechunkTest)
{
// Test that rechunking doesn't change the transaction order.
TxGraph dummy;
TxGraphCluster cluster(1, &dummy);
// Add some transactions in a silly order.
TxEntry entry1(100, 100);
entry1.m_cluster = &cluster;
cluster.AddTransaction(entry1, true);
TxEntry entry2(1, 500);
entry2.m_cluster = &cluster;
cluster.AddTransaction(entry2, false);
TxEntry entry3(200, 100);
entry3.m_cluster = &cluster;
cluster.AddTransaction(entry3, false);
std::vector<TxEntry::TxEntryRef> expected;
expected.emplace_back(entry1);
expected.emplace_back(entry2);
expected.emplace_back(entry3);
// Check that the current order is entry1, entry2, entry3
std::vector<TxEntry::TxEntryRef> linearized;
for (auto &chunk : cluster.m_chunks) {
for (auto tx : chunk.txs) {
linearized.push_back(tx);
}
}
BOOST_CHECK(linearized.size() == 3);
for (size_t i=0; i <linearized.size(); ++i) {
BOOST_CHECK(&linearized[i].get() == &expected[i].get());
}
cluster.Rechunk();
std::vector<TxEntry::TxEntryRef> linearized2;
for (auto &chunk : cluster.m_chunks) {
for (auto tx : chunk.txs) {
linearized2.push_back(tx);
}
}
BOOST_CHECK(linearized2.size() == 3);
for (size_t i=0; i <linearized2.size(); ++i) {
BOOST_CHECK(&linearized2[i].get() == &expected[i].get());
}
}
BOOST_AUTO_TEST_CASE(TxGraphClusterMergeTest)
{
TxGraph dummy;
TxGraphCluster cluster1(1, &dummy);
TxGraphCluster cluster2(2, &dummy);
TxGraphCluster cluster3(3, &dummy);
std::vector<TxEntry> all_entries;
all_entries.reserve(30);
// Add some random transactions to each cluster.
for (int i=0; i < 30; ++i) {
all_entries.emplace_back(GetRand(1000)+1, GetRand(1000)+1);
if (i < 10) {
all_entries.back().m_cluster = &cluster1;
cluster1.AddTransaction(all_entries.back(), true);
} else if (i < 20) {
all_entries.back().m_cluster = &cluster2;
cluster2.AddTransaction(all_entries.back(), true);
} else {
all_entries.back().m_cluster = &cluster3;
cluster3.AddTransaction(all_entries.back(), true);
}
}
std::vector<TxEntry::TxEntryRef> linearized;
for (auto &chunk : cluster1.m_chunks) {
for (auto tx : chunk.txs) {
linearized.push_back(tx);
}
}
// Check that the ordering of transactions within each cluster is preserved
// under the merge.
std::vector<TxGraphCluster*> clusters = {&cluster2, &cluster3};
cluster1.Merge(clusters.begin(), clusters.end(), false);
std::vector<TxEntry::TxEntryRef> linearized2;
for (auto &chunk : cluster1.m_chunks) {
for (auto tx : chunk.txs) {
linearized2.push_back(tx);
}
}
std::vector<TxEntry::TxEntryRef>::iterator it1, it2;
it1 = linearized.begin();
it2 = linearized2.begin();
while (it1 != linearized.end()) {
BOOST_CHECK(it2 != linearized2.end());
if (&(it1->get()) == &(it2->get())) {
++it1;
++it2;
} else {
++it2;
}
}
// Check that GetLastTransaction returns the correct item
BOOST_CHECK(&(cluster1.GetLastTransaction().get()) == &linearized2.back().get());
}
BOOST_AUTO_TEST_CASE(TxGraphClusterSortTest)
{
// Verify that parents always come before children, no matter what we stick
// in a cluster.
TxGraph dummy;
TxGraphCluster cluster(1, &dummy);
std::vector<TxEntry> all_entries;
all_entries.reserve(30);
// Create some random transactions.
for (int i=0; i < 30; ++i) {
all_entries.emplace_back(GetRand(1000)+1, GetRand(1000)+1);
for (int j=0; j<i; ++j) {
if (GetRand(4) == 0) {
all_entries[i].GetTxEntryParents().insert(all_entries[j]);
all_entries[j].GetTxEntryChildren().insert(all_entries[i]);
}
}
all_entries.back().m_cluster = &cluster;
cluster.AddTransaction(all_entries.back(), false);
}
cluster.Sort(true); // Ensure that we're invoking the Sort() call.
BOOST_CHECK(cluster.Check());
}
BOOST_AUTO_TEST_CASE(TxGraphTest)
{
TxGraph txgraph;
GraphLimits limits{1'000'000'000, 1'000'000'000};
std::vector<TxEntry*> all_entries;
std::set<int64_t> in_mempool;
// Test that the TxGraph stays consistent as we mix and match operations.
for (int i=0; i<1000; ++i) {
// Randomly choose an operation to perform.
// 1. Add a loose transaction
// 2. Remove a transaction and all its descendants.
// 3. Remove a set of transactions for a block.
// 4. Trim the worst chunk.
// 5. Add back a set of confirmed transactions.
int rand_val = GetRand(100);
if (rand_val < 85) {
// Add a random transaction, 85% of the time.
TxEntry *entry = new TxEntry(GetRand(1000)+1, GetRand(1000)+1);
all_entries.emplace_back(entry);
std::vector<TxEntry::TxEntryRef> parents;
for (size_t j=0; j<all_entries.size(); ++j) {
if (GetRand(100) < 1 && in_mempool.count(all_entries[j]->unique_id)) {
parents.emplace_back(*all_entries[j]);
}
}
txgraph.AddTx(entry, entry->GetTxSize(), entry->GetModifiedFee(), parents);
in_mempool.insert(entry->unique_id);
} else if (rand_val < 90 && in_mempool.size() > 0) {
// RBF a transaction, 5% of the time.
int64_t random_index = GetRand(all_entries.size());
if (in_mempool.count(all_entries[random_index]->unique_id)) {
TxEntry *entry = all_entries[random_index];
std::vector<TxEntry::TxEntryRef> all_conflicts = txgraph.GetDescendants({*entry});
GraphLimits limits{100, 1000000};
TxGraphChangeSet changeset(&txgraph, limits, all_conflicts);
TxEntry *new_entry = new TxEntry(GetRand(1000)+1, GetRand(1000)+1);
all_entries.emplace_back(new_entry);
std::vector<TxEntry::TxEntryRef> parents;
for (size_t j=0; j<all_entries.size(); ++j) {
bool conflict{false};
for (auto &c: all_conflicts) {
if (c.get().unique_id == all_entries[j]->unique_id) {
conflict = true;
break;
}
}
if (conflict) continue;
if (GetRand(100) < 1 && in_mempool.count(all_entries[j]->unique_id)) {
parents.emplace_back(*all_entries[j]);
}
}
if (changeset.AddTx(*new_entry, parents)) {
std::vector<FeeFrac> diagram_dummy;
changeset.GetFeerateDiagramOld(diagram_dummy);
changeset.GetFeerateDiagramNew(diagram_dummy);
// Should do a diagram comparison here, but just apply 1/2 the time.
if (GetRand(100) < 50) {
in_mempool.insert(new_entry->unique_id);
for (auto &c: all_conflicts) {
in_mempool.erase(c.get().unique_id);
}
changeset.Apply();
}
}
}
} else if (rand_val < 95 && in_mempool.size() > 0) {
// Remove a random transaction and its descendants, 5% of the time.
int64_t random_index = GetRand(all_entries.size());
if (in_mempool.count(all_entries[random_index]->unique_id)) {
std::vector<TxEntry::TxEntryRef> to_remove = txgraph.GetDescendants({*all_entries[random_index]});
txgraph.RemoveBatch(to_remove);
for (size_t k=0; k<to_remove.size(); ++k) {
in_mempool.erase(to_remove[k].get().unique_id);
}
}
} else if (rand_val < 96 && in_mempool.size() > 0) {
// Mine a "block", of 5% of the transactions.
uint64_t num_to_remove = GetRand(in_mempool.size()+1) / 20;
std::set<TxEntry::TxEntryRef, TxEntry::CompareById> selected_transactions;
while (selected_transactions.size() < num_to_remove) {
int64_t random_index = GetRand(all_entries.size());
if (in_mempool.count(all_entries[random_index]->unique_id)) {
std::vector<TxEntry::TxEntryRef> to_mine = txgraph.GetAncestors({*all_entries[random_index]});
selected_transactions.insert(to_mine.begin(), to_mine.end());
}
}
if (selected_transactions.size() > 0) {
std::vector<TxEntry::TxEntryRef> selected_transactions_vec(selected_transactions.begin(), selected_transactions.end());
txgraph.RemoveBatch(selected_transactions_vec);
for (size_t k=0; k<selected_transactions_vec.size(); ++k) {
in_mempool.erase(selected_transactions_vec[k].get().unique_id);
}
}
} else if (rand_val < 98 && in_mempool.size() > 0) {
// Test the TxSelector (mining code).
TxSelector txselector(&txgraph);
int64_t num_invocations = GetRand(in_mempool.size()+1) / 20;
std::vector<TxEntry::TxEntryRef> selected_transactions;
while (num_invocations > 0) {
txselector.SelectNextChunk(selected_transactions);
// TODO: add a check that the feerates never go up as we make
// further calls.
txselector.Success();
--num_invocations;
}
txgraph.RemoveBatch(selected_transactions);
for (size_t k=0; k<selected_transactions.size(); ++k) {
in_mempool.erase(selected_transactions[k].get().unique_id);
}
// Check that the selected transactions are topologically valid.
// Do this by dumping into a cluster, and running Cluster::Check
TxGraphCluster dummy(-1, &txgraph);
for (auto& tx : selected_transactions) {
tx.get().m_cluster = &dummy;
dummy.AddTransaction(tx, false);
}
dummy.Rechunk();
dummy.Check();
} else if (rand_val < 99 && in_mempool.size() > 0) {
// Reorg a block with probability 1%
// Generate some random transactions and pick some existing random
// transactions to have as children.
int64_t num_to_add = GetRand(20)+1;
std::map<int64_t, std::vector<TxEntry::TxEntryRef>> children_map;
std::vector<TxEntry::TxEntryRef> new_transactions;
for (int k=0; k<num_to_add; ++k) {
all_entries.emplace_back(new TxEntry(GetRand(1000)+1, GetRand(1000)+1));
new_transactions.emplace_back(*all_entries.back());
children_map.insert(std::make_pair(all_entries.back()->unique_id, std::vector<TxEntry::TxEntryRef>()));
for (auto &id : in_mempool) {
if (GetRand(100) < 10) {
// Take advantage of the fact that the unique id's for
// each transaction correspond to the index in
// all_entries
for (auto entry : all_entries) {
if (entry->unique_id == id) {
children_map[all_entries.back()->unique_id].emplace_back(*entry);
break;
}
}
}
}
// Pick some random parents, amongst the set of new
// transactions.
std::vector<TxEntry::TxEntryRef> parents;
for (int m=0; m<k; ++m) {
if (GetRand(100) < 30) {
parents.emplace_back(new_transactions[m]);
}
}
txgraph.AddTx(all_entries.back(), all_entries.back()->GetTxSize(), all_entries.back()->GetModifiedFee(), parents);
}
std::vector<TxEntry::TxEntryRef> removed;
txgraph.AddParentTxs(new_transactions, limits, [&](const TxEntry& tx) { return children_map[tx.unique_id]; }, removed);
BOOST_CHECK(removed.size() == 0); // no limits should be hit
} else if (in_mempool.size() > 0) {
// Trim the worst chunk with probability 1%
Trimmer trimmer(&txgraph);
std::vector<TxEntry::TxEntryRef> removed;
auto feerate = trimmer.RemoveWorstChunk(removed);
// Check that the feerate matches what was removed
CAmount total_fee{0};
int32_t total_size{0};
for (auto &entry : removed) {
total_fee += entry.get().GetModifiedFee();
total_size += entry.get().GetTxSize();
in_mempool.erase(entry.get().unique_id);
}
BOOST_CHECK(feerate == CFeeRate(total_fee, total_size));
}
txgraph.Check(limits);
}
for (auto txentry : all_entries) {
delete txentry;
}
all_entries.clear();
}
BOOST_AUTO_TEST_SUITE_END()

View File

@ -96,14 +96,12 @@ BOOST_FIXTURE_TEST_CASE(version3_tests, RegTestingSetup)
LOCK2(cs_main, pool.cs);
TestMemPoolEntryHelper entry;
std::set<Txid> empty_conflicts_set;
CTxMemPool::setEntries empty_ancestors;
CTxMemPool::Entries empty_ancestors;
auto mempool_tx_v3 = make_tx(random_outpoints(1), /*version=*/3);
pool.addUnchecked(entry.FromTx(mempool_tx_v3));
auto mempool_tx_v2 = make_tx(random_outpoints(1), /*version=*/2);
pool.addUnchecked(entry.FromTx(mempool_tx_v2));
// Default values.
CTxMemPool::Limits m_limits{};
// Cannot spend from an unconfirmed v3 transaction unless this tx is also v3.
{
@ -111,33 +109,33 @@ BOOST_FIXTURE_TEST_CASE(version3_tests, RegTestingSetup)
// ^
// tx_v2_from_v3
auto tx_v2_from_v3 = make_tx({COutPoint{mempool_tx_v3->GetHash(), 0}}, /*version=*/2);
auto ancestors_v2_from_v3{pool.CalculateMemPoolAncestors(entry.FromTx(tx_v2_from_v3), m_limits)};
auto ancestors_v2_from_v3{pool.CalculateMemPoolAncestors(entry.FromTx(tx_v2_from_v3))};
const auto expected_error_str{strprintf("non-v3 tx %s (wtxid=%s) cannot spend from v3 tx %s (wtxid=%s)",
tx_v2_from_v3->GetHash().ToString(), tx_v2_from_v3->GetWitnessHash().ToString(),
mempool_tx_v3->GetHash().ToString(), mempool_tx_v3->GetWitnessHash().ToString())};
auto result_v2_from_v3{SingleV3Checks(tx_v2_from_v3, *ancestors_v2_from_v3, empty_conflicts_set, GetVirtualTransactionSize(*tx_v2_from_v3))};
auto result_v2_from_v3{SingleV3Checks(pool, tx_v2_from_v3, empty_conflicts_set, GetVirtualTransactionSize(*tx_v2_from_v3))};
BOOST_CHECK_EQUAL(result_v2_from_v3->first, expected_error_str);
BOOST_CHECK_EQUAL(result_v2_from_v3->second, nullptr);
Package package_v3_v2{mempool_tx_v3, tx_v2_from_v3};
BOOST_CHECK_EQUAL(*PackageV3Checks(tx_v2_from_v3, GetVirtualTransactionSize(*tx_v2_from_v3), package_v3_v2, empty_ancestors), expected_error_str);
CTxMemPool::setEntries entries_mempool_v3{pool.GetIter(mempool_tx_v3->GetHash().ToUint256()).value()};
BOOST_CHECK_EQUAL(*PackageV3Checks(tx_v2_from_v3, GetVirtualTransactionSize(*tx_v2_from_v3), {tx_v2_from_v3}, entries_mempool_v3), expected_error_str);
BOOST_CHECK_EQUAL(*PackageV3Checks(pool, tx_v2_from_v3, GetVirtualTransactionSize(*tx_v2_from_v3), package_v3_v2, empty_ancestors), expected_error_str);
CTxMemPool::Entries entries_mempool_v3{pool.GetIter(mempool_tx_v3->GetHash().ToUint256()).value()};
BOOST_CHECK_EQUAL(*PackageV3Checks(pool, tx_v2_from_v3, GetVirtualTransactionSize(*tx_v2_from_v3), {tx_v2_from_v3}, entries_mempool_v3), expected_error_str);
// mempool_tx_v3 mempool_tx_v2
// ^ ^
// tx_v2_from_v2_and_v3
auto tx_v2_from_v2_and_v3 = make_tx({COutPoint{mempool_tx_v3->GetHash(), 0}, COutPoint{mempool_tx_v2->GetHash(), 0}}, /*version=*/2);
auto ancestors_v2_from_both{pool.CalculateMemPoolAncestors(entry.FromTx(tx_v2_from_v2_and_v3), m_limits)};
auto ancestors_v2_from_both{pool.CalculateMemPoolAncestors(entry.FromTx(tx_v2_from_v2_and_v3))};
const auto expected_error_str_2{strprintf("non-v3 tx %s (wtxid=%s) cannot spend from v3 tx %s (wtxid=%s)",
tx_v2_from_v2_and_v3->GetHash().ToString(), tx_v2_from_v2_and_v3->GetWitnessHash().ToString(),
mempool_tx_v3->GetHash().ToString(), mempool_tx_v3->GetWitnessHash().ToString())};
auto result_v2_from_both{SingleV3Checks(tx_v2_from_v2_and_v3, *ancestors_v2_from_both, empty_conflicts_set, GetVirtualTransactionSize(*tx_v2_from_v2_and_v3))};
auto result_v2_from_both{SingleV3Checks(pool, tx_v2_from_v2_and_v3, empty_conflicts_set, GetVirtualTransactionSize(*tx_v2_from_v2_and_v3))};
BOOST_CHECK_EQUAL(result_v2_from_both->first, expected_error_str_2);
BOOST_CHECK_EQUAL(result_v2_from_both->second, nullptr);
Package package_v3_v2_v2{mempool_tx_v3, mempool_tx_v2, tx_v2_from_v2_and_v3};
BOOST_CHECK_EQUAL(*PackageV3Checks(tx_v2_from_v2_and_v3, GetVirtualTransactionSize(*tx_v2_from_v2_and_v3), package_v3_v2_v2, empty_ancestors), expected_error_str_2);
BOOST_CHECK_EQUAL(*PackageV3Checks(pool, tx_v2_from_v2_and_v3, GetVirtualTransactionSize(*tx_v2_from_v2_and_v3), package_v3_v2_v2, empty_ancestors), expected_error_str_2);
}
// V3 cannot spend from an unconfirmed non-v3 transaction.
@ -146,28 +144,28 @@ BOOST_FIXTURE_TEST_CASE(version3_tests, RegTestingSetup)
// ^
// tx_v3_from_v2
auto tx_v3_from_v2 = make_tx({COutPoint{mempool_tx_v2->GetHash(), 0}}, /*version=*/3);
auto ancestors_v3_from_v2{pool.CalculateMemPoolAncestors(entry.FromTx(tx_v3_from_v2), m_limits)};
auto ancestors_v3_from_v2{pool.CalculateMemPoolAncestors(entry.FromTx(tx_v3_from_v2))};
const auto expected_error_str{strprintf("v3 tx %s (wtxid=%s) cannot spend from non-v3 tx %s (wtxid=%s)",
tx_v3_from_v2->GetHash().ToString(), tx_v3_from_v2->GetWitnessHash().ToString(),
mempool_tx_v2->GetHash().ToString(), mempool_tx_v2->GetWitnessHash().ToString())};
auto result_v3_from_v2{SingleV3Checks(tx_v3_from_v2, *ancestors_v3_from_v2, empty_conflicts_set, GetVirtualTransactionSize(*tx_v3_from_v2))};
auto result_v3_from_v2{SingleV3Checks(pool, tx_v3_from_v2, empty_conflicts_set, GetVirtualTransactionSize(*tx_v3_from_v2))};
BOOST_CHECK_EQUAL(result_v3_from_v2->first, expected_error_str);
BOOST_CHECK_EQUAL(result_v3_from_v2->second, nullptr);
Package package_v2_v3{mempool_tx_v2, tx_v3_from_v2};
BOOST_CHECK_EQUAL(*PackageV3Checks(tx_v3_from_v2, GetVirtualTransactionSize(*tx_v3_from_v2), package_v2_v3, empty_ancestors), expected_error_str);
CTxMemPool::setEntries entries_mempool_v2{pool.GetIter(mempool_tx_v2->GetHash().ToUint256()).value()};
BOOST_CHECK_EQUAL(*PackageV3Checks(tx_v3_from_v2, GetVirtualTransactionSize(*tx_v3_from_v2), {tx_v3_from_v2}, entries_mempool_v2), expected_error_str);
BOOST_CHECK_EQUAL(*PackageV3Checks(pool, tx_v3_from_v2, GetVirtualTransactionSize(*tx_v3_from_v2), package_v2_v3, empty_ancestors), expected_error_str);
CTxMemPool::Entries entries_mempool_v2{pool.GetIter(mempool_tx_v2->GetHash().ToUint256()).value()};
BOOST_CHECK_EQUAL(*PackageV3Checks(pool, tx_v3_from_v2, GetVirtualTransactionSize(*tx_v3_from_v2), {tx_v3_from_v2}, entries_mempool_v2), expected_error_str);
// mempool_tx_v3 mempool_tx_v2
// ^ ^
// tx_v3_from_v2_and_v3
auto tx_v3_from_v2_and_v3 = make_tx({COutPoint{mempool_tx_v3->GetHash(), 0}, COutPoint{mempool_tx_v2->GetHash(), 0}}, /*version=*/3);
auto ancestors_v3_from_both{pool.CalculateMemPoolAncestors(entry.FromTx(tx_v3_from_v2_and_v3), m_limits)};
auto ancestors_v3_from_both{pool.CalculateMemPoolAncestors(entry.FromTx(tx_v3_from_v2_and_v3))};
const auto expected_error_str_2{strprintf("v3 tx %s (wtxid=%s) cannot spend from non-v3 tx %s (wtxid=%s)",
tx_v3_from_v2_and_v3->GetHash().ToString(), tx_v3_from_v2_and_v3->GetWitnessHash().ToString(),
mempool_tx_v2->GetHash().ToString(), mempool_tx_v2->GetWitnessHash().ToString())};
auto result_v3_from_both{SingleV3Checks(tx_v3_from_v2_and_v3, *ancestors_v3_from_both, empty_conflicts_set, GetVirtualTransactionSize(*tx_v3_from_v2_and_v3))};
auto result_v3_from_both{SingleV3Checks(pool, tx_v3_from_v2_and_v3, empty_conflicts_set, GetVirtualTransactionSize(*tx_v3_from_v2_and_v3))};
BOOST_CHECK_EQUAL(result_v3_from_both->first, expected_error_str_2);
BOOST_CHECK_EQUAL(result_v3_from_both->second, nullptr);
@ -175,7 +173,7 @@ BOOST_FIXTURE_TEST_CASE(version3_tests, RegTestingSetup)
const auto expected_error_str_3{strprintf("tx %s (wtxid=%s) would have too many ancestors",
tx_v3_from_v2_and_v3->GetHash().ToString(), tx_v3_from_v2_and_v3->GetWitnessHash().ToString())};
Package package_v3_v2_v3{mempool_tx_v3, mempool_tx_v2, tx_v3_from_v2_and_v3};
BOOST_CHECK_EQUAL(*PackageV3Checks(tx_v3_from_v2_and_v3, GetVirtualTransactionSize(*tx_v3_from_v2_and_v3), package_v3_v2_v3, empty_ancestors), expected_error_str_3);
BOOST_CHECK_EQUAL(*PackageV3Checks(pool, tx_v3_from_v2_and_v3, GetVirtualTransactionSize(*tx_v3_from_v2_and_v3), package_v3_v2_v3, empty_ancestors), expected_error_str_3);
}
// V3 from V3 is ok, and non-V3 from non-V3 is ok.
{
@ -183,23 +181,23 @@ BOOST_FIXTURE_TEST_CASE(version3_tests, RegTestingSetup)
// ^
// tx_v3_from_v3
auto tx_v3_from_v3 = make_tx({COutPoint{mempool_tx_v3->GetHash(), 0}}, /*version=*/3);
auto ancestors_v3{pool.CalculateMemPoolAncestors(entry.FromTx(tx_v3_from_v3), m_limits)};
BOOST_CHECK(SingleV3Checks(tx_v3_from_v3, *ancestors_v3, empty_conflicts_set, GetVirtualTransactionSize(*tx_v3_from_v3))
auto ancestors_v3{pool.CalculateMemPoolAncestors(entry.FromTx(tx_v3_from_v3))};
BOOST_CHECK(SingleV3Checks(pool, tx_v3_from_v3, empty_conflicts_set, GetVirtualTransactionSize(*tx_v3_from_v3))
== std::nullopt);
Package package_v3_v3{mempool_tx_v3, tx_v3_from_v3};
BOOST_CHECK(PackageV3Checks(tx_v3_from_v3, GetVirtualTransactionSize(*tx_v3_from_v3), package_v3_v3, empty_ancestors) == std::nullopt);
BOOST_CHECK(PackageV3Checks(pool, tx_v3_from_v3, GetVirtualTransactionSize(*tx_v3_from_v3), package_v3_v3, empty_ancestors) == std::nullopt);
// mempool_tx_v2
// ^
// tx_v2_from_v2
auto tx_v2_from_v2 = make_tx({COutPoint{mempool_tx_v2->GetHash(), 0}}, /*version=*/2);
auto ancestors_v2{pool.CalculateMemPoolAncestors(entry.FromTx(tx_v2_from_v2), m_limits)};
BOOST_CHECK(SingleV3Checks(tx_v2_from_v2, *ancestors_v2, empty_conflicts_set, GetVirtualTransactionSize(*tx_v2_from_v2))
auto ancestors_v2{pool.CalculateMemPoolAncestors(entry.FromTx(tx_v2_from_v2))};
BOOST_CHECK(SingleV3Checks(pool, tx_v2_from_v2, empty_conflicts_set, GetVirtualTransactionSize(*tx_v2_from_v2))
== std::nullopt);
Package package_v2_v2{mempool_tx_v2, tx_v2_from_v2};
BOOST_CHECK(PackageV3Checks(tx_v2_from_v2, GetVirtualTransactionSize(*tx_v2_from_v2), package_v2_v2, empty_ancestors) == std::nullopt);
BOOST_CHECK(PackageV3Checks(pool, tx_v2_from_v2, GetVirtualTransactionSize(*tx_v2_from_v2), package_v2_v2, empty_ancestors) == std::nullopt);
}
// Tx spending v3 cannot have too many mempool ancestors
@ -217,15 +215,15 @@ BOOST_FIXTURE_TEST_CASE(version3_tests, RegTestingSetup)
}
auto tx_v3_multi_parent = make_tx(mempool_outpoints, /*version=*/3);
package_multi_parents.emplace_back(tx_v3_multi_parent);
auto ancestors{pool.CalculateMemPoolAncestors(entry.FromTx(tx_v3_multi_parent), m_limits)};
BOOST_CHECK_EQUAL(ancestors->size(), 3);
auto ancestors{pool.CalculateMemPoolAncestors(entry.FromTx(tx_v3_multi_parent))};
BOOST_CHECK_EQUAL(ancestors.size(), 3);
const auto expected_error_str{strprintf("tx %s (wtxid=%s) would have too many ancestors",
tx_v3_multi_parent->GetHash().ToString(), tx_v3_multi_parent->GetWitnessHash().ToString())};
auto result{SingleV3Checks(tx_v3_multi_parent, *ancestors, empty_conflicts_set, GetVirtualTransactionSize(*tx_v3_multi_parent))};
auto result{SingleV3Checks(pool, tx_v3_multi_parent, empty_conflicts_set, GetVirtualTransactionSize(*tx_v3_multi_parent))};
BOOST_CHECK_EQUAL(result->first, expected_error_str);
BOOST_CHECK_EQUAL(result->second, nullptr);
BOOST_CHECK_EQUAL(*PackageV3Checks(tx_v3_multi_parent, GetVirtualTransactionSize(*tx_v3_multi_parent), package_multi_parents, empty_ancestors),
BOOST_CHECK_EQUAL(*PackageV3Checks(pool, tx_v3_multi_parent, GetVirtualTransactionSize(*tx_v3_multi_parent), package_multi_parents, empty_ancestors),
expected_error_str);
}
@ -243,16 +241,16 @@ BOOST_FIXTURE_TEST_CASE(version3_tests, RegTestingSetup)
}
auto tx_v3_multi_gen = make_tx({last_outpoint}, /*version=*/3);
package_multi_gen.emplace_back(tx_v3_multi_gen);
auto ancestors{pool.CalculateMemPoolAncestors(entry.FromTx(tx_v3_multi_gen), m_limits)};
auto ancestors{pool.CalculateMemPoolAncestors(entry.FromTx(tx_v3_multi_gen))};
const auto expected_error_str{strprintf("tx %s (wtxid=%s) would have too many ancestors",
tx_v3_multi_gen->GetHash().ToString(), tx_v3_multi_gen->GetWitnessHash().ToString())};
auto result{SingleV3Checks(tx_v3_multi_gen, *ancestors, empty_conflicts_set, GetVirtualTransactionSize(*tx_v3_multi_gen))};
auto result{SingleV3Checks(pool, tx_v3_multi_gen, empty_conflicts_set, GetVirtualTransactionSize(*tx_v3_multi_gen))};
BOOST_CHECK_EQUAL(result->first, expected_error_str);
BOOST_CHECK_EQUAL(result->second, nullptr);
// Middle tx is what triggers a failure for the grandchild:
BOOST_CHECK_EQUAL(*PackageV3Checks(middle_tx, GetVirtualTransactionSize(*middle_tx), package_multi_gen, empty_ancestors), expected_error_str);
BOOST_CHECK(PackageV3Checks(tx_v3_multi_gen, GetVirtualTransactionSize(*tx_v3_multi_gen), package_multi_gen, empty_ancestors) == std::nullopt);
BOOST_CHECK_EQUAL(*PackageV3Checks(pool, middle_tx, GetVirtualTransactionSize(*middle_tx), package_multi_gen, empty_ancestors), expected_error_str);
BOOST_CHECK(PackageV3Checks(pool, tx_v3_multi_gen, GetVirtualTransactionSize(*tx_v3_multi_gen), package_multi_gen, empty_ancestors) == std::nullopt);
}
// Tx spending v3 cannot be too large in virtual size.
@ -261,15 +259,15 @@ BOOST_FIXTURE_TEST_CASE(version3_tests, RegTestingSetup)
{
auto tx_v3_child_big = make_tx(many_inputs, /*version=*/3);
const auto vsize{GetVirtualTransactionSize(*tx_v3_child_big)};
auto ancestors{pool.CalculateMemPoolAncestors(entry.FromTx(tx_v3_child_big), m_limits)};
auto ancestors{pool.CalculateMemPoolAncestors(entry.FromTx(tx_v3_child_big))};
const auto expected_error_str{strprintf("v3 child tx %s (wtxid=%s) is too big: %u > %u virtual bytes",
tx_v3_child_big->GetHash().ToString(), tx_v3_child_big->GetWitnessHash().ToString(), vsize, V3_CHILD_MAX_VSIZE)};
auto result{SingleV3Checks(tx_v3_child_big, *ancestors, empty_conflicts_set, GetVirtualTransactionSize(*tx_v3_child_big))};
auto result{SingleV3Checks(pool, tx_v3_child_big, empty_conflicts_set, GetVirtualTransactionSize(*tx_v3_child_big))};
BOOST_CHECK_EQUAL(result->first, expected_error_str);
BOOST_CHECK_EQUAL(result->second, nullptr);
Package package_child_big{mempool_tx_v3, tx_v3_child_big};
BOOST_CHECK_EQUAL(*PackageV3Checks(tx_v3_child_big, GetVirtualTransactionSize(*tx_v3_child_big), package_child_big, empty_ancestors),
BOOST_CHECK_EQUAL(*PackageV3Checks(pool, tx_v3_child_big, GetVirtualTransactionSize(*tx_v3_child_big), package_child_big, empty_ancestors),
expected_error_str);
}
@ -296,24 +294,24 @@ BOOST_FIXTURE_TEST_CASE(version3_tests, RegTestingSetup)
mtx_many_sigops.vout.back().nValue = 10000;
auto tx_many_sigops{MakeTransactionRef(mtx_many_sigops)};
auto ancestors{pool.CalculateMemPoolAncestors(entry.FromTx(tx_many_sigops), m_limits)};
auto ancestors{pool.CalculateMemPoolAncestors(entry.FromTx(tx_many_sigops))};
// legacy uses fAccurate = false, and the maximum number of multisig keys is used
const int64_t total_sigops{static_cast<int64_t>(tx_many_sigops->vin.size()) * static_cast<int64_t>(script_multisig.GetSigOpCount(/*fAccurate=*/false))};
BOOST_CHECK_EQUAL(total_sigops, tx_many_sigops->vin.size() * MAX_PUBKEYS_PER_MULTISIG);
const int64_t bip141_vsize{GetVirtualTransactionSize(*tx_many_sigops)};
// Weight limit is not reached...
BOOST_CHECK(SingleV3Checks(tx_many_sigops, *ancestors, empty_conflicts_set, bip141_vsize) == std::nullopt);
BOOST_CHECK(SingleV3Checks(pool, tx_many_sigops, empty_conflicts_set, bip141_vsize) == std::nullopt);
// ...but sigop limit is.
const auto expected_error_str{strprintf("v3 child tx %s (wtxid=%s) is too big: %u > %u virtual bytes",
tx_many_sigops->GetHash().ToString(), tx_many_sigops->GetWitnessHash().ToString(),
total_sigops * DEFAULT_BYTES_PER_SIGOP / WITNESS_SCALE_FACTOR, V3_CHILD_MAX_VSIZE)};
auto result{SingleV3Checks(tx_many_sigops, *ancestors, empty_conflicts_set,
auto result{SingleV3Checks(pool, tx_many_sigops, empty_conflicts_set,
GetVirtualTransactionSize(*tx_many_sigops, /*nSigOpCost=*/total_sigops, /*bytes_per_sigop=*/ DEFAULT_BYTES_PER_SIGOP))};
BOOST_CHECK_EQUAL(result->first, expected_error_str);
BOOST_CHECK_EQUAL(result->second, nullptr);
Package package_child_sigops{mempool_tx_v3, tx_many_sigops};
BOOST_CHECK_EQUAL(*PackageV3Checks(tx_many_sigops, total_sigops * DEFAULT_BYTES_PER_SIGOP / WITNESS_SCALE_FACTOR, package_child_sigops, empty_ancestors),
BOOST_CHECK_EQUAL(*PackageV3Checks(pool, tx_many_sigops, total_sigops * DEFAULT_BYTES_PER_SIGOP / WITNESS_SCALE_FACTOR, package_child_sigops, empty_ancestors),
expected_error_str);
}
@ -321,43 +319,42 @@ BOOST_FIXTURE_TEST_CASE(version3_tests, RegTestingSetup)
auto tx_mempool_v3_child = make_tx({COutPoint{mempool_tx_v3->GetHash(), 0}}, /*version=*/3);
{
BOOST_CHECK(GetTransactionWeight(*tx_mempool_v3_child) <= V3_CHILD_MAX_VSIZE * WITNESS_SCALE_FACTOR);
auto ancestors{pool.CalculateMemPoolAncestors(entry.FromTx(tx_mempool_v3_child), m_limits)};
BOOST_CHECK(SingleV3Checks(tx_mempool_v3_child, *ancestors, empty_conflicts_set, GetVirtualTransactionSize(*tx_mempool_v3_child)) == std::nullopt);
auto ancestors{pool.CalculateMemPoolAncestors(entry.FromTx(tx_mempool_v3_child))};
BOOST_CHECK(SingleV3Checks(pool, tx_mempool_v3_child, empty_conflicts_set, GetVirtualTransactionSize(*tx_mempool_v3_child)) == std::nullopt);
pool.addUnchecked(entry.FromTx(tx_mempool_v3_child));
Package package_v3_1p1c{mempool_tx_v3, tx_mempool_v3_child};
BOOST_CHECK(PackageV3Checks(tx_mempool_v3_child, GetVirtualTransactionSize(*tx_mempool_v3_child), package_v3_1p1c, empty_ancestors) == std::nullopt);
BOOST_CHECK(PackageV3Checks(pool, tx_mempool_v3_child, GetVirtualTransactionSize(*tx_mempool_v3_child), package_v3_1p1c, empty_ancestors) == std::nullopt);
}
// A v3 transaction cannot have more than 1 descendant. Sibling is returned when exactly 1 exists.
{
auto tx_v3_child2 = make_tx({COutPoint{mempool_tx_v3->GetHash(), 1}}, /*version=*/3);
// Configuration where parent already has 1 other child in mempool
auto ancestors_1sibling{pool.CalculateMemPoolAncestors(entry.FromTx(tx_v3_child2), m_limits)};
auto ancestors_1sibling{pool.CalculateMemPoolAncestors(entry.FromTx(tx_v3_child2))};
const auto expected_error_str{strprintf("tx %s (wtxid=%s) would exceed descendant count limit",
mempool_tx_v3->GetHash().ToString(), mempool_tx_v3->GetWitnessHash().ToString())};
auto result_with_sibling_eviction{SingleV3Checks(tx_v3_child2, *ancestors_1sibling, empty_conflicts_set, GetVirtualTransactionSize(*tx_v3_child2))};
auto result_with_sibling_eviction{SingleV3Checks(pool, tx_v3_child2, empty_conflicts_set, GetVirtualTransactionSize(*tx_v3_child2))};
BOOST_CHECK_EQUAL(result_with_sibling_eviction->first, expected_error_str);
// The other mempool child is returned to allow for sibling eviction.
BOOST_CHECK_EQUAL(result_with_sibling_eviction->second, tx_mempool_v3_child);
// If directly replacing the child, make sure there is no double-counting.
BOOST_CHECK(SingleV3Checks(tx_v3_child2, *ancestors_1sibling, {tx_mempool_v3_child->GetHash()}, GetVirtualTransactionSize(*tx_v3_child2))
BOOST_CHECK(SingleV3Checks(pool, tx_v3_child2, {tx_mempool_v3_child->GetHash()}, GetVirtualTransactionSize(*tx_v3_child2))
== std::nullopt);
Package package_v3_1p2c{mempool_tx_v3, tx_mempool_v3_child, tx_v3_child2};
BOOST_CHECK_EQUAL(*PackageV3Checks(tx_v3_child2, GetVirtualTransactionSize(*tx_v3_child2), package_v3_1p2c, empty_ancestors),
BOOST_CHECK_EQUAL(*PackageV3Checks(pool, tx_v3_child2, GetVirtualTransactionSize(*tx_v3_child2), package_v3_1p2c, empty_ancestors),
expected_error_str);
// Configuration where parent already has 2 other children in mempool (no sibling eviction allowed). This may happen as the result of a reorg.
pool.addUnchecked(entry.FromTx(tx_v3_child2));
auto tx_v3_child3 = make_tx({COutPoint{mempool_tx_v3->GetHash(), 24}}, /*version=*/3);
auto entry_mempool_parent = pool.GetIter(mempool_tx_v3->GetHash().ToUint256()).value();
BOOST_CHECK_EQUAL(entry_mempool_parent->GetCountWithDescendants(), 3);
auto ancestors_2siblings{pool.CalculateMemPoolAncestors(entry.FromTx(tx_v3_child3), m_limits)};
BOOST_CHECK_EQUAL(pool.GetNumChildren(entry_mempool_parent), 2);
auto ancestors_2siblings{pool.CalculateMemPoolAncestors(entry.FromTx(tx_v3_child3))};
auto result_2children{SingleV3Checks(tx_v3_child3, *ancestors_2siblings, empty_conflicts_set, GetVirtualTransactionSize(*tx_v3_child3))};
auto result_2children{SingleV3Checks(pool, tx_v3_child3, empty_conflicts_set, GetVirtualTransactionSize(*tx_v3_child3))};
BOOST_CHECK_EQUAL(result_2children->first, expected_error_str);
// The other mempool child is not returned because sibling eviction is not allowed.
BOOST_CHECK_EQUAL(result_2children->second, nullptr);
@ -374,10 +371,10 @@ BOOST_FIXTURE_TEST_CASE(version3_tests, RegTestingSetup)
pool.addUnchecked(entry.FromTx(tx_mempool_sibling));
pool.addUnchecked(entry.FromTx(tx_mempool_nibling));
auto ancestors_3gen{pool.CalculateMemPoolAncestors(entry.FromTx(tx_to_submit), m_limits)};
auto ancestors_3gen{pool.CalculateMemPoolAncestors(entry.FromTx(tx_to_submit))};
const auto expected_error_str{strprintf("tx %s (wtxid=%s) would exceed descendant count limit",
tx_mempool_grandparent->GetHash().ToString(), tx_mempool_grandparent->GetWitnessHash().ToString())};
auto result_3gen{SingleV3Checks(tx_to_submit, *ancestors_3gen, empty_conflicts_set, GetVirtualTransactionSize(*tx_to_submit))};
auto result_3gen{SingleV3Checks(pool, tx_to_submit, empty_conflicts_set, GetVirtualTransactionSize(*tx_to_submit))};
BOOST_CHECK_EQUAL(result_3gen->first, expected_error_str);
// The other mempool child is not returned because sibling eviction is not allowed.
BOOST_CHECK_EQUAL(result_3gen->second, nullptr);

View File

@ -124,20 +124,25 @@ void CheckMempoolV3Invariants(const CTxMemPool& tx_pool)
LOCK(tx_pool.cs);
for (const auto& tx_info : tx_pool.infoAll()) {
const auto& entry = *Assert(tx_pool.GetEntry(tx_info.tx->GetHash()));
size_t desc_count, desc_size, anc_count, anc_size;
CAmount desc_fees, anc_fees;
tx_pool.CalculateDescendantData(entry, desc_count, desc_size, desc_fees);
tx_pool.CalculateAncestorData(entry, anc_count, anc_size, anc_fees);
if (tx_info.tx->nVersion == 3) {
// Check that special v3 ancestor/descendant limits and rules are always respected
Assert(entry.GetCountWithDescendants() <= V3_DESCENDANT_LIMIT);
Assert(entry.GetCountWithAncestors() <= V3_ANCESTOR_LIMIT);
Assert(desc_count <= V3_DESCENDANT_LIMIT);
Assert(anc_count <= V3_ANCESTOR_LIMIT);
// If this transaction has at least 1 ancestor, it's a "child" and has restricted weight.
if (entry.GetCountWithAncestors() > 1) {
if (anc_count > 1) {
Assert(entry.GetTxSize() <= V3_CHILD_MAX_VSIZE);
// All v3 transactions must only have v3 unconfirmed parents.
const auto& parents = entry.GetMemPoolParentsConst();
const auto& parents = tx_pool.GetParents(entry);
Assert(parents.begin()->get().GetSharedTx()->nVersion == 3);
}
} else if (entry.GetCountWithAncestors() > 1) {
} else if (anc_count > 1) {
// All non-v3 transactions must only have non-v3 unconfirmed parents.
for (const auto& parent : entry.GetMemPoolParentsConst()) {
for (const auto& parent : tx_pool.GetParents(entry)) {
Assert(parent.get().GetSharedTx()->nVersion != 3);
}
}

File diff suppressed because it is too large Load Diff

View File

@ -81,49 +81,6 @@ struct mempoolentry_wtxid
}
};
/** \class CompareTxMemPoolEntryByDescendantScore
*
* Sort an entry by max(score/size of entry's tx, score/size with all descendants).
*/
class CompareTxMemPoolEntryByDescendantScore
{
public:
bool operator()(const CTxMemPoolEntry& a, const CTxMemPoolEntry& b) const
{
double a_mod_fee, a_size, b_mod_fee, b_size;
GetModFeeAndSize(a, a_mod_fee, a_size);
GetModFeeAndSize(b, b_mod_fee, b_size);
// Avoid division by rewriting (a/b > c/d) as (a*d > c*b).
double f1 = a_mod_fee * b_size;
double f2 = a_size * b_mod_fee;
if (f1 == f2) {
return a.GetTime() >= b.GetTime();
}
return f1 < f2;
}
// Return the fee/size we're using for sorting this entry.
void GetModFeeAndSize(const CTxMemPoolEntry &a, double &mod_fee, double &size) const
{
// Compare feerate with descendants to feerate of the transaction, and
// return the fee/size for the max.
double f1 = (double)a.GetModifiedFee() * a.GetSizeWithDescendants();
double f2 = (double)a.GetModFeesWithDescendants() * a.GetTxSize();
if (f2 > f1) {
mod_fee = a.GetModFeesWithDescendants();
size = a.GetSizeWithDescendants();
} else {
mod_fee = a.GetModifiedFee();
size = a.GetTxSize();
}
}
};
/** \class CompareTxMemPoolEntryByScore
*
* Sort by feerate of entry (fee/size) in descending order
@ -154,54 +111,8 @@ public:
}
};
/** \class CompareTxMemPoolEntryByAncestorScore
*
* Sort an entry by min(score/size of entry's tx, score/size with all ancestors).
*/
class CompareTxMemPoolEntryByAncestorFee
{
public:
template<typename T>
bool operator()(const T& a, const T& b) const
{
double a_mod_fee, a_size, b_mod_fee, b_size;
GetModFeeAndSize(a, a_mod_fee, a_size);
GetModFeeAndSize(b, b_mod_fee, b_size);
// Avoid division by rewriting (a/b > c/d) as (a*d > c*b).
double f1 = a_mod_fee * b_size;
double f2 = a_size * b_mod_fee;
if (f1 == f2) {
return a.GetTx().GetHash() < b.GetTx().GetHash();
}
return f1 > f2;
}
// Return the fee/size we're using for sorting this entry.
template <typename T>
void GetModFeeAndSize(const T &a, double &mod_fee, double &size) const
{
// Compare feerate with ancestors to feerate of the transaction, and
// return the fee/size for the min.
double f1 = (double)a.GetModifiedFee() * a.GetSizeWithAncestors();
double f2 = (double)a.GetModFeesWithAncestors() * a.GetTxSize();
if (f1 > f2) {
mod_fee = a.GetModFeesWithAncestors();
size = a.GetSizeWithAncestors();
} else {
mod_fee = a.GetModifiedFee();
size = a.GetTxSize();
}
}
};
// Multi_index tag names
struct descendant_score {};
struct entry_time {};
struct ancestor_score {};
struct index_by_wtxid {};
/**
@ -339,23 +250,11 @@ public:
mempoolentry_wtxid,
SaltedTxidHasher
>,
// sorted by fee rate
boost::multi_index::ordered_non_unique<
boost::multi_index::tag<descendant_score>,
boost::multi_index::identity<CTxMemPoolEntry>,
CompareTxMemPoolEntryByDescendantScore
>,
// sorted by entry time
boost::multi_index::ordered_non_unique<
boost::multi_index::tag<entry_time>,
boost::multi_index::identity<CTxMemPoolEntry>,
CompareTxMemPoolEntryByEntryTime
>,
// sorted by fee rate with ancestors
boost::multi_index::ordered_non_unique<
boost::multi_index::tag<ancestor_score>,
boost::multi_index::identity<CTxMemPoolEntry>,
CompareTxMemPoolEntryByAncestorFee
>
>
> indexed_transaction_set;
@ -390,45 +289,42 @@ public:
mutable RecursiveMutex cs;
indexed_transaction_set mapTx GUARDED_BY(cs);
// Clusters
TxGraph txgraph GUARDED_BY(cs);
using txiter = indexed_transaction_set::nth_index<0>::type::const_iterator;
std::vector<CTransactionRef> txns_randomized GUARDED_BY(cs); //!< All transactions in mapTx, in random order
typedef std::set<txiter, CompareIteratorByHash> setEntries;
typedef std::vector<txiter> Entries;
using Limits = kernel::MemPoolLimits;
uint64_t CalculateDescendantMaximum(txiter entry) const EXCLUSIVE_LOCKS_REQUIRED(cs);
void CalculateAncestorData(const CTxMemPoolEntry& entry, size_t& ancestor_count, size_t& ancestor_size, CAmount& ancestor_fees) const EXCLUSIVE_LOCKS_REQUIRED(cs);
void CalculateDescendantData(const CTxMemPoolEntry& entry, size_t& descendant_count, size_t& descendant_size, CAmount& descendant_fees) const EXCLUSIVE_LOCKS_REQUIRED(cs);
int64_t GetNumChildren(txiter it) const { return it->GetTxEntryChildren().size(); }
int64_t GetNumChildren(const CTxMemPoolEntry &e) const { return e.GetTxEntryChildren().size(); }
std::vector<CTxMemPoolEntry::CTxMemPoolEntryRef> GetChildren(const CTxMemPoolEntry &entry) const;
std::vector<CTxMemPoolEntry::CTxMemPoolEntryRef> GetParents(const CTxMemPoolEntry &entry) const;
Entries CalculateParentsOf(const CTransaction& tx) const;
private:
typedef std::map<txiter, setEntries, CompareIteratorByHash> cacheMap;
void UpdateParent(txiter entry, txiter parent, bool add) EXCLUSIVE_LOCKS_REQUIRED(cs);
void UpdateChild(txiter entry, txiter child, bool add) EXCLUSIVE_LOCKS_REQUIRED(cs);
std::vector<indexed_transaction_set::const_iterator> GetSortedDepthAndScore() const EXCLUSIVE_LOCKS_REQUIRED(cs);
std::vector<indexed_transaction_set::const_iterator> GetSortedScoreWithTopology() const EXCLUSIVE_LOCKS_REQUIRED(cs);
/**
* Track locally submitted transactions to periodically retry initial broadcast.
*/
std::set<uint256> m_unbroadcast_txids GUARDED_BY(cs);
std::vector<TxEntry::TxEntryRef> CalculateParents(const CTransaction& tx) const EXCLUSIVE_LOCKS_REQUIRED(cs);
std::vector<TxEntry::TxEntryRef> CalculateParents(const CTxMemPoolEntry &entry) const EXCLUSIVE_LOCKS_REQUIRED(cs);
/**
* Helper function to calculate all in-mempool ancestors of staged_ancestors and apply ancestor
* and descendant limits (including staged_ancestors themselves, entry_size and entry_count).
*
* @param[in] entry_size Virtual size to include in the limits.
* @param[in] entry_count How many entries to include in the limits.
* @param[in] staged_ancestors Should contain entries in the mempool.
* @param[in] limits Maximum number and size of ancestors and descendants
*
* @return all in-mempool ancestors, or an error if any ancestor or descendant limits were hit
*/
util::Result<setEntries> CalculateAncestorsAndCheckLimits(int64_t entry_size,
size_t entry_count,
CTxMemPoolEntry::Parents &staged_ancestors,
const Limits& limits
) const EXCLUSIVE_LOCKS_REQUIRED(cs);
// Helper to remove all transactions that conflict with a given
// transaction (used for transactions appearing in a block).
void removeConflicts(const CTransaction& tx) EXCLUSIVE_LOCKS_REQUIRED(cs);
public:
indirectmap<COutPoint, const CTransaction*> mapNextTx GUARDED_BY(cs);
@ -464,17 +360,12 @@ public:
* all inputs are in the mapNextTx array). If sanity-checking is turned off,
* check does nothing.
*/
void check(const CCoinsViewCache& active_coins_tip, int64_t spendheight) const EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
void check(const CCoinsViewCache& active_coins_tip, int64_t spendheight, Limits *limits=nullptr) const EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
// addUnchecked must updated state for all ancestors of a given transaction,
// to track size/count of descendant transactions. First version of
// addUnchecked can be used to have it call CalculateMemPoolAncestors(), and
// then invoke the second version.
// Note that addUnchecked is ONLY called from ATMP outside of tests
// and any other callers may break wallet's in-mempool tracking (due to
// lack of CValidationInterface::TransactionAddedToMempool callbacks).
void addUnchecked(const CTxMemPoolEntry& entry) EXCLUSIVE_LOCKS_REQUIRED(cs, cs_main);
void addUnchecked(const CTxMemPoolEntry& entry, setEntries& setAncestors) EXCLUSIVE_LOCKS_REQUIRED(cs, cs_main);
void removeRecursive(const CTransaction& tx, MemPoolRemovalReason reason) EXCLUSIVE_LOCKS_REQUIRED(cs);
/** After reorg, filter the entries that would no longer be valid in the next block, and update
@ -485,10 +376,9 @@ public:
* and updates an entry's LockPoints.
* */
void removeForReorg(CChain& chain, std::function<bool(txiter)> filter_final_and_mature) EXCLUSIVE_LOCKS_REQUIRED(cs, cs_main);
void removeConflicts(const CTransaction& tx) EXCLUSIVE_LOCKS_REQUIRED(cs);
void removeForBlock(const std::vector<CTransactionRef>& vtx, unsigned int nBlockHeight) EXCLUSIVE_LOCKS_REQUIRED(cs);
bool CompareDepthAndScore(const uint256& hasha, const uint256& hashb, bool wtxid=false);
bool CompareMiningScoreWithTopology(const uint256& hasha, const uint256& hashb, bool wtxid=false);
bool isSpent(const COutPoint& outpoint) const;
unsigned int GetTransactionsUpdated() const;
void AddTransactionsUpdated(unsigned int n);
@ -536,10 +426,8 @@ public:
* If a transaction is in this set, then all in-mempool descendants must
* also be in the set, unless this transaction is being removed for being
* in a block.
* Set updateDescendants to true when removing a tx that was in a block, so
* that any in-mempool descendants have their ancestor state updated.
*/
void RemoveStaged(setEntries& stage, bool updateDescendants, MemPoolRemovalReason reason) EXCLUSIVE_LOCKS_REQUIRED(cs);
void RemoveStaged(Entries& stage, MemPoolRemovalReason reason) EXCLUSIVE_LOCKS_REQUIRED(cs);
/** UpdateTransactionsFromBlock is called when adding transactions from a
* disconnected block back to the mempool, new mempool entries may have
@ -556,41 +444,54 @@ public:
*/
void UpdateTransactionsFromBlock(const std::vector<uint256>& vHashesToUpdate) EXCLUSIVE_LOCKS_REQUIRED(cs, cs_main) LOCKS_EXCLUDED(m_epoch);
/** Get the chidlren of a tx using mapNextTx (needed for reorgs) */
std::vector<TxEntry::TxEntryRef> GetChildrenOf(const TxEntry& tx);
/**
* Try to calculate all in-mempool ancestors of entry.
* Calculate whether cluster size limits would be exceeded if a new tx were
* added to the mempool (assuming no conflicts).
*
* @param[in] entry_size vbytes of the new transaction(s)
* @param[in] entry_count number of new transactions to be added
* @param[in] limits Contains maximum cluster size/count
* @param[in] all_parents All parents of entry/entries in the mempool
*
* @return true if cluster limits are respected, or an error if limits were
* exceeded
*/
util::Result<bool> CheckClusterSizeLimit(int64_t entry_size, size_t entry_count,
const Limits& limits, Entries& all_parents) const
EXCLUSIVE_LOCKS_REQUIRED(cs);
std::vector<FeeFrac> GetFeerateDiagram() const EXCLUSIVE_LOCKS_REQUIRED(cs);
private:
util::Result<bool> CheckClusterSizeAgainstLimits(const std::vector<TxEntry::TxEntryRef>& parents,
int64_t count, int64_t vbytes, GraphLimits limits) const
EXCLUSIVE_LOCKS_REQUIRED(cs);
public:
/**
* Calculate all in-mempool ancestors of entry.
* (these are all calculated including the tx itself)
*
* @param[in] entry CTxMemPoolEntry of which all in-mempool ancestors are calculated
* @param[in] limits Maximum number and size of ancestors and descendants
* @param[in] fSearchForParents Whether to search a tx's vin for in-mempool parents, or look
* up parents from mapLinks. Must be true for entries not in
* the mempool
*
* @return all in-mempool ancestors, or an error if any ancestor or descendant limits were hit
* @return all in-mempool ancestors
*/
util::Result<setEntries> CalculateMemPoolAncestors(const CTxMemPoolEntry& entry,
const Limits& limits,
setEntries CalculateMemPoolAncestors(const CTxMemPoolEntry& entry,
bool fSearchForParents = true) const EXCLUSIVE_LOCKS_REQUIRED(cs);
/**
* Same as CalculateMemPoolAncestors, but always returns a (non-optional) setEntries.
* Should only be used when it is assumed CalculateMemPoolAncestors would not fail. If
* CalculateMemPoolAncestors does unexpectedly fail, an empty setEntries is returned and the
* error is logged to BCLog::MEMPOOL with level BCLog::Level::Error. In debug builds, failure
* of CalculateMemPoolAncestors will lead to shutdown due to assertion failure.
*
* @param[in] calling_fn_name Name of calling function so we can properly log the call site
*
* @return a setEntries corresponding to the result of CalculateMemPoolAncestors or an empty
* setEntries if it failed
*
* @see CTXMemPool::CalculateMemPoolAncestors()
*/
setEntries AssumeCalculateMemPoolAncestors(
std::string_view calling_fn_name,
const CTxMemPoolEntry &entry,
const Limits& limits,
bool fSearchForParents = true) const EXCLUSIVE_LOCKS_REQUIRED(cs);
std::vector<CTxMemPoolEntry::CTxMemPoolEntryRef> CalculateMemPoolAncestorsFast(const CTxMemPoolEntry &entry, bool fSearchForParents) const;
private:
std::vector<TxEntry::TxEntryRef> CalculateAncestors(const CTxMemPoolEntry& entry, bool fSearchForParents) const;
public:
bool HasDescendants(const Txid& txid) const;
/** Collect the entire cluster of connected transactions for each transaction in txids.
* All txids must correspond to transaction entries in the mempool, otherwise this returns an
@ -613,10 +514,7 @@ public:
util::Result<void> CheckPackageLimits(const Package& package,
int64_t total_vsize) const EXCLUSIVE_LOCKS_REQUIRED(cs);
/** Populate setDescendants with all in-mempool descendants of hash.
* Assumes that setDescendants includes all in-mempool descendants of anything
* already in it. */
void CalculateDescendants(txiter it, setEntries& setDescendants) const EXCLUSIVE_LOCKS_REQUIRED(cs);
Entries CalculateDescendants(Entries txs) const EXCLUSIVE_LOCKS_REQUIRED(cs);
/** The minimum fee to get into the mempool, which may itself not be enough
* for larger-sized transactions.
@ -638,12 +536,12 @@ public:
int Expire(std::chrono::seconds time) EXCLUSIVE_LOCKS_REQUIRED(cs);
/**
* Calculate the ancestor and descendant count for the given transaction.
* Calculate the ancestor and cluster count for the given transaction.
* The counts include the transaction itself.
* When ancestors is non-zero (ie, the transaction itself is in the mempool),
* ancestorsize and ancestorfees will also be set to the appropriate values.
*/
void GetTransactionAncestry(const uint256& txid, size_t& ancestors, size_t& descendants, size_t* ancestorsize = nullptr, CAmount* ancestorfees = nullptr) const;
void GetTransactionAncestry(const uint256& txid, size_t& ancestors, size_t& clustersize, size_t* ancestorsize = nullptr, CAmount* ancestorfees = nullptr) const;
/**
* @returns true if an initial attempt to load the persisted mempool was made, regardless of
@ -745,14 +643,13 @@ public:
* chunk, and represent their complete cluster. In other words, they have no
* in-mempool ancestors.
*
* @param[in] replacement_fees Package fees
* @param[in] replacement_vsize Package size (must be greater than 0)
* @param[in] new_entries The new transactions with their fees
* @param[in] direct_conflicts All transactions that would be removed directly by
* having a conflicting input with a proposed transaction
* @param[in] all_conflicts All transactions that would be removed
* @return old and new diagram pair respectively, or an error string if the conflicts don't match a calculable topology
*/
util::Result<std::pair<std::vector<FeeFrac>, std::vector<FeeFrac>>> CalculateChunksForRBF(CAmount replacement_fees, int64_t replacement_vsize, const setEntries& direct_conflicts, const setEntries& all_conflicts) EXCLUSIVE_LOCKS_REQUIRED(cs);
util::Result<std::pair<std::vector<FeeFrac>, std::vector<FeeFrac>>> CalculateChunksForRBF(std::vector<std::pair<CTxMemPoolEntry*, CAmount>> new_entries, const setEntries& direct_conflicts, const setEntries& all_conflicts) EXCLUSIVE_LOCKS_REQUIRED(cs);
/* Check that all direct conflicts are in a cluster size of two or less. Each
* direct conflict may be in a separate cluster.
@ -760,47 +657,6 @@ public:
std::optional<std::string> CheckConflictTopology(const setEntries& direct_conflicts);
private:
/** UpdateForDescendants is used by UpdateTransactionsFromBlock to update
* the descendants for a single transaction that has been added to the
* mempool but may have child transactions in the mempool, eg during a
* chain reorg.
*
* @pre CTxMemPoolEntry::m_children is correct for the given tx and all
* descendants.
* @pre cachedDescendants is an accurate cache where each entry has all
* descendants of the corresponding key, including those that should
* be removed for violation of ancestor limits.
* @post if updateIt has any non-excluded descendants, cachedDescendants has
* a new cache line for updateIt.
* @post descendants_to_remove has a new entry for any descendant which exceeded
* ancestor limits relative to updateIt.
*
* @param[in] updateIt the entry to update for its descendants
* @param[in,out] cachedDescendants a cache where each line corresponds to all
* descendants. It will be updated with the descendants of the transaction
* being updated, so that future invocations don't need to walk the same
* transaction again, if encountered in another transaction chain.
* @param[in] setExclude the set of descendant transactions in the mempool
* that must not be accounted for (because any descendants in setExclude
* were added to the mempool after the transaction being updated and hence
* their state is already reflected in the parent state).
* @param[out] descendants_to_remove Populated with the txids of entries that
* exceed ancestor limits. It's the responsibility of the caller to
* removeRecursive them.
*/
void UpdateForDescendants(txiter updateIt, cacheMap& cachedDescendants,
const std::set<uint256>& setExclude, std::set<uint256>& descendants_to_remove) EXCLUSIVE_LOCKS_REQUIRED(cs);
/** Update ancestors of hash to add/remove it as a descendant transaction. */
void UpdateAncestorsOf(bool add, txiter hash, setEntries &setAncestors) EXCLUSIVE_LOCKS_REQUIRED(cs);
/** Set ancestor state for an entry */
void UpdateEntryForAncestors(txiter it, const setEntries &setAncestors) EXCLUSIVE_LOCKS_REQUIRED(cs);
/** For each transaction being removed, update ancestors and any direct children.
* If updateDescendants is true, then also update in-mempool descendants'
* ancestor state. */
void UpdateForRemoveFromMempool(const setEntries &entriesToRemove, bool updateDescendants) EXCLUSIVE_LOCKS_REQUIRED(cs);
/** Sever link between specified transaction and direct children. */
void UpdateChildrenForRemoval(txiter entry) EXCLUSIVE_LOCKS_REQUIRED(cs);
/** Before calling removeUnchecked for a given transaction,
* UpdateForRemoveFromMempool must be called on the entire (dependent) set
* of transactions being removed at the same time. We use each
@ -821,7 +677,7 @@ public:
*/
bool visited(const txiter it) const EXCLUSIVE_LOCKS_REQUIRED(cs, m_epoch)
{
return m_epoch.visited(it->m_epoch_marker);
return m_epoch.visited(it->mempool_epoch_marker);
}
bool visited(std::optional<txiter> it) const EXCLUSIVE_LOCKS_REQUIRED(cs, m_epoch)

420
src/util/bitset.h Normal file
View File

@ -0,0 +1,420 @@
// Copyright (c) The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_UTIL_BITSET_H
#define BITCOIN_UTIL_BITSET_H
#include <array>
#include <bit>
#include <cstdint>
#include <limits>
#include <type_traits>
#ifdef _MSC_VER
# include <intrin.h>
#endif
/* This file provides data types similar to std::bitset, but adds the following functionality:
*
* - Efficient iteration over all set bits (compatible with range-based for loops).
* - Efficient search for the first and last set bit (First() and Last()).
* - Efficient set subtraction: (a / b) implements "a and not b".
* - Efficient subset/superset testing: (a >> b) and (a << b).
* - Efficient set overlap testing: a && b
* - Efficient construction of set containing 0..N-1 (S::Fill).
*
* Other differences:
* - BitSet<N> is a bitset that supports at least N elements, but may support more (Size() reports
* the actual number). Because the actual number is unpredictable, there are no operations that
* affect all positions (like std::bitset's operator~, flip(), or all()).
* - Various other unimplemented features.
*/
namespace bitset_detail {
/** Count the number of bits set in an unsigned integer type. */
template<typename I>
unsigned inline PopCount(I v)
{
static_assert(std::is_integral_v<I> && std::is_unsigned_v<I> && std::numeric_limits<I>::radix == 2);
constexpr auto BITS = std::numeric_limits<I>::digits;
// Algorithms from https://en.wikipedia.org/wiki/Hamming_weight#Efficient_implementation.
// These seem to be faster than __builtin_popcount when compiling for non-SSE4 on x86_64.
if constexpr (BITS <= 32) {
v -= (v >> 1) & 0x55555555;
v = (v & 0x33333333) + ((v >> 2) & 0x33333333);
v = (v + (v >> 4)) & 0x0f0f0f0f;
v += v >> 8;
v += v >> 16;
return v & 0x3f;
} else {
static_assert(BITS <= 64);
v -= (v >> 1) & 0x5555555555555555;
v = (v & 0x3333333333333333) + ((v >> 2) & 0x3333333333333333);
v = (v + (v >> 4)) & 0x0f0f0f0f0f0f0f0f;
return (v * uint64_t{0x0101010101010101}) >> 56;
}
}
/** A bitset implementation backed by a single integer of type I. */
template<typename I>
class IntBitSet
{
// Only binary, unsigned, integer, types allowed.
static_assert(std::is_integral_v<I> && std::is_unsigned_v<I> && std::numeric_limits<I>::radix == 2);
/** The maximum number of bits this bitset supports. */
static constexpr unsigned MAX_SIZE = std::numeric_limits<I>::digits;
/** Integer whose bits represent this bitset. */
I m_val;
/** Internal constructor with a given integer as contents. */
IntBitSet(I val) noexcept : m_val{val} {}
/** Dummy type to return using end(). Only used for comparing with Iterator. */
class IteratorEnd
{
friend class IntBitSet;
IteratorEnd() = default;
public:
IteratorEnd(const IteratorEnd&) = default;
};
/** Iterator type returned by begin(), which efficiently iterates all 1 positions. */
class Iterator
{
friend class IntBitSet;
I m_val; /**< The original integer's remaining bits. */
unsigned m_pos; /** Last reported 1 position (if m_pos != 0). */
Iterator(I val) noexcept : m_val(val), m_pos(0)
{
if (m_val != 0) m_pos = std::countr_zero(m_val);
}
public:
/** Do not allow external code to construct an Iterator. */
Iterator() = delete;
// Copying is allowed.
Iterator(const Iterator&) noexcept = default;
Iterator& operator=(const Iterator&) noexcept = default;
/** Test whether we are not yet done (can only compare with IteratorEnd). */
friend bool operator!=(const Iterator& a, const IteratorEnd&) noexcept
{
return a.m_val != 0;
}
/** Test whether we are done (can only compare with IteratorEnd). */
friend bool operator==(const Iterator& a, const IteratorEnd&) noexcept
{
return a.m_val == 0;
}
/** Progress to the next 1 bit (only if != IteratorEnd). */
Iterator& operator++() noexcept
{
m_val &= m_val - I{1U};
if (m_val != 0) m_pos = std::countr_zero(m_val);
return *this;
}
/** Get the current bit position (only if != IteratorEnd). */
const unsigned& operator*() const noexcept { return m_pos; }
};
public:
/** Construct an all-zero bitset. */
IntBitSet() noexcept : m_val{0} {}
/** Copy construct a bitset. */
IntBitSet(const IntBitSet&) noexcept = default;
/** Copy assign a bitset. */
IntBitSet& operator=(const IntBitSet&) noexcept = default;
/** Construct a bitset with bits 0..count-1 (inclusive) set to 1. */
static IntBitSet Fill(unsigned count) noexcept {
IntBitSet ret;
if (count) ret.m_val = I(~I{0}) >> (MAX_SIZE - count);
return ret;
}
/** Compute the number of 1 bits in the bitset. */
unsigned Count() const noexcept { return PopCount(m_val); }
/** Return the number of bits that this object holds. */
static constexpr unsigned Size() noexcept { return MAX_SIZE; }
/** Set a bit to 1. */
void Set(unsigned pos) noexcept { m_val |= I{1U} << pos; }
/** Set a bit to the specified value. */
void Set(unsigned pos, bool val) noexcept { m_val = (m_val & ~I(I{1U} << pos)) | (I(val) << pos); }
/** Set a bit to 0. */
void Reset(unsigned pos) noexcept { m_val &= ~I(I{1U} << pos); }
/** Retrieve a bit at the given position. */
bool operator[](unsigned pos) const noexcept { return (m_val >> pos) & 1U; }
/** Check if all bits are 0. */
bool None() const noexcept { return m_val == 0; }
/** Check if any bits are 1. */
bool Any() const noexcept { return m_val != 0; }
/** Return an object that iterates over all 1 bits (++ and * only allowed when != end()). */
Iterator begin() const noexcept { return Iterator(m_val); }
/** Return a dummy object to compare Iterators with. */
IteratorEnd end() const noexcept { return IteratorEnd(); }
/** Find the first element (requires Any()). */
unsigned First() const noexcept { return std::countr_zero(m_val); }
/** Find the last element (requires Any()). */
unsigned Last() const noexcept { return std::bit_width(m_val) - 1; }
/** Set this object's bits to be the binary AND between respective bits from this and a. */
IntBitSet& operator|=(const IntBitSet& a) noexcept { m_val |= a.m_val; return *this; }
/** Set this object's bits to be the binary OR between respective bits from this and a. */
IntBitSet& operator&=(const IntBitSet& a) noexcept { m_val &= a.m_val; return *this; }
/** Set this object's bits to be the binary AND NOT between respective bits from this and a. */
IntBitSet& operator/=(const IntBitSet& a) noexcept { m_val &= ~a.m_val; return *this; }
/** Check if the intersection between two sets is non-empty. */
friend bool operator&&(const IntBitSet& a, const IntBitSet& b) noexcept { return a.m_val & b.m_val; }
/** Return an object with the binary AND between respective bits from a and b. */
friend IntBitSet operator&(const IntBitSet& a, const IntBitSet& b) noexcept { return {I(a.m_val & b.m_val)}; }
/** Return an object with the binary OR between respective bits from a and b. */
friend IntBitSet operator|(const IntBitSet& a, const IntBitSet& b) noexcept { return {I(a.m_val | b.m_val)}; }
/** Return an object with the binary AND NOT between respective bits from a and b. */
friend IntBitSet operator/(const IntBitSet& a, const IntBitSet& b) noexcept { return {I(a.m_val & ~b.m_val)}; }
/** Check if bitset a and bitset b are identical. */
friend bool operator==(const IntBitSet& a, const IntBitSet& b) noexcept { return a.m_val == b.m_val; }
/** Check if bitset a and bitset b are different. */
friend bool operator!=(const IntBitSet& a, const IntBitSet& b) noexcept { return a.m_val != b.m_val; }
/** Check if bitset a is a superset of bitset b (= every 1 bit in b is also in a). */
friend bool operator>>(const IntBitSet& a, const IntBitSet& b) noexcept { return (b.m_val & ~a.m_val) == 0; }
/** Check if bitset a is a subset of bitset b (= every 1 bit in a is also in b). */
friend bool operator<<(const IntBitSet& a, const IntBitSet& b) noexcept { return (a.m_val & ~b.m_val) == 0; }
/** Swap two bitsets. */
friend void swap(IntBitSet& a, IntBitSet& b) noexcept { std::swap(a.m_val, b.m_val); }
};
/** A bitset implementation backed by N integers of type I. */
template<typename I, unsigned N>
class MultiIntBitSet
{
// Only binary, unsigned, integer, types allowed.
static_assert(std::is_integral_v<I> && std::is_unsigned_v<I> && std::numeric_limits<I>::radix == 2);
/** The number of bits per integer. */
static constexpr unsigned LIMB_BITS = std::numeric_limits<I>::digits;
/** Number of elements this set type supports. */
static constexpr unsigned MAX_SIZE = LIMB_BITS * N;
/** Array whose member integers store the bits of the set. */
std::array<I, N> m_val{};
/** Dummy type to return using end(). Only used for comparing with Iterator. */
class IteratorEnd
{
friend class MultiIntBitSet;
IteratorEnd() = default;
public:
IteratorEnd(const IteratorEnd&) = default;
};
/** Iterator type returned by begin(), which efficiently iterates all 1 positions. */
class Iterator
{
friend class MultiIntBitSet;
const std::array<I, N>* m_ptr; /**< Pointer to array to fetch bits from. */
I m_val; /**< The remaining bits of (*m_ptr)[m_idx]. */
unsigned m_pos; /**< The last reported position. */
unsigned m_idx; /**< The index in *m_ptr currently being iterated over. */
Iterator(const std::array<I, N>* ptr) noexcept : m_ptr(ptr), m_idx(0)
{
do {
m_val = (*m_ptr)[m_idx];
if (m_val) {
m_pos = std::countr_zero(m_val) + m_idx * LIMB_BITS;
break;
}
++m_idx;
} while(m_idx < N);
}
public:
/** Do not allow external code to construct an Iterator. */
Iterator() = delete;
// Copying is allowed.
Iterator(const Iterator&) noexcept = default;
Iterator& operator=(const Iterator&) noexcept = default;
/** Test whether we are not yet done (can only compare with IteratorEnd). */
friend bool operator!=(const Iterator& a, const IteratorEnd&) noexcept
{
return a.m_idx != N;
}
/** Test whether we are done (can only compare with IteratorEnd). */
friend bool operator==(const Iterator& a, const IteratorEnd&) noexcept
{
return a.m_idx == N;
}
/** Progress to the next 1 bit (only if != IteratorEnd). */
Iterator& operator++() noexcept
{
m_val &= m_val - I{1U};
if (m_val == 0) {
while (true) {
++m_idx;
if (m_idx == N) break;
m_val = (*m_ptr)[m_idx];
if (m_val) {
m_pos = std::countr_zero(m_val) + m_idx * LIMB_BITS;
break;
}
}
} else {
m_pos = std::countr_zero(m_val) + m_idx * LIMB_BITS;
}
return *this;
}
/** Get the current bit position (only if != IteratorEnd). */
const unsigned& operator*() const noexcept { return m_pos; }
};
public:
/** Construct an all-zero bitset. */
MultiIntBitSet() noexcept {}
/** Copy construct a bitset. */
MultiIntBitSet(const MultiIntBitSet&) noexcept = default;
/** Copy assign a bitset. */
MultiIntBitSet& operator=(const MultiIntBitSet&) noexcept = default;
/** Set a bit to 1. */
void Set(unsigned pos) noexcept { m_val[pos / LIMB_BITS] |= I{1U} << (pos % LIMB_BITS); }
/** Set a bit to the specified value. */
void Set(unsigned pos, bool val) noexcept { m_val[pos / LIMB_BITS] = (m_val[pos / LIMB_BITS] & ~I(I{1U} << (pos % LIMB_BITS))) | (I{val} << (pos % LIMB_BITS)); }
/** Set a bit to 0. */
void Reset(unsigned pos) noexcept { m_val[pos / LIMB_BITS] &= ~I(I{1U} << (pos % LIMB_BITS)); }
/** Retrieve a bit at the given position. */
bool operator[](unsigned pos) const noexcept { return (m_val[pos / LIMB_BITS] >> (pos % LIMB_BITS)) & 1U; }
/** Construct a bitset with bits 0..count-1 (inclusive) set to 1. */
static MultiIntBitSet Fill(unsigned count) noexcept {
MultiIntBitSet ret;
if (count) {
unsigned i = 0;
while (count > LIMB_BITS) {
ret.m_val[i++] = ~I{0};
count -= LIMB_BITS;
}
ret.m_val[i] = I(~I{0}) >> (LIMB_BITS - count);
}
return ret;
}
/** Return the number of bits that this object holds. */
static constexpr unsigned Size() noexcept { return MAX_SIZE; }
/** Compute the number of 1 bits in the bitset. */
unsigned Count() const noexcept
{
unsigned ret{0};
for (I v : m_val) ret += PopCount(v);
return ret;
}
/** Check if all bits are 0. */
bool None() const noexcept
{
for (auto v : m_val) {
if (v != 0) return false;
}
return true;
}
/** Check if any bits are 1. */
bool Any() const noexcept
{
for (auto v : m_val) {
if (v != 0) return true;
}
return false;
}
/** Return an object that iterates over all 1 bits (++ and * only allowed when != end()). */
Iterator begin() const noexcept { return Iterator(&m_val); }
/** Return a dummy object to compare Iterators with. */
IteratorEnd end() const noexcept { return IteratorEnd(); }
/** Find the first element (requires Any()). */
unsigned First() const noexcept
{
unsigned p = 0;
while (m_val[p] == 0) ++p;
return std::countr_zero(m_val[p]) + p * LIMB_BITS;
}
/** Find the last element (requires Any()). */
unsigned Last() const noexcept
{
unsigned p = N - 1;
while (m_val[p] == 0) --p;
return std::bit_width(m_val[p]) - 1 + p * LIMB_BITS;
}
/** Set this object's bits to be the binary OR between respective bits from this and a. */
MultiIntBitSet& operator|=(const MultiIntBitSet& a) noexcept
{
for (unsigned i = 0; i < N; ++i) {
m_val[i] |= a.m_val[i];
}
return *this;
}
/** Set this object's bits to be the binary AND between respective bits from this and a. */
MultiIntBitSet& operator&=(const MultiIntBitSet& a) noexcept
{
for (unsigned i = 0; i < N; ++i) {
m_val[i] &= a.m_val[i];
}
return *this;
}
/** Set this object's bits to be the binary AND NOT between respective bits from this and a. */
MultiIntBitSet& operator/=(const MultiIntBitSet& a) noexcept
{
for (unsigned i = 0; i < N; ++i) {
m_val[i] &= ~a.m_val[i];
}
return *this;
}
/** Check whether the intersection between two sets is non-empty. */
friend bool operator&&(const MultiIntBitSet& a, const MultiIntBitSet& b) noexcept
{
for (unsigned i = 0; i < N; ++i) {
if (a.m_val[i] & b.m_val[i]) return true;
}
return false;
}
/** Return an object with the binary AND between respective bits from a and b. */
friend MultiIntBitSet operator&(const MultiIntBitSet& a, const MultiIntBitSet& b) noexcept
{
MultiIntBitSet r;
for (unsigned i = 0; i < N; ++i) {
r.m_val[i] = a.m_val[i] & b.m_val[i];
}
return r;
}
/** Return an object with the binary OR between respective bits from a and b. */
friend MultiIntBitSet operator|(const MultiIntBitSet& a, const MultiIntBitSet& b) noexcept
{
MultiIntBitSet r;
for (unsigned i = 0; i < N; ++i) {
r.m_val[i] = a.m_val[i] | b.m_val[i];
}
return r;
}
/** Return an object with the binary AND NOT between respective bits from a and b. */
friend MultiIntBitSet operator/(const MultiIntBitSet& a, const MultiIntBitSet& b) noexcept
{
MultiIntBitSet r;
for (unsigned i = 0; i < N; ++i) {
r.m_val[i] = a.m_val[i] & ~b.m_val[i];
}
return r;
}
/** Check if bitset a is a superset of bitset b (= every 1 bit in b is also in a). */
friend bool operator>>(const MultiIntBitSet& a, const MultiIntBitSet& b) noexcept
{
for (unsigned i = 0; i < N; ++i) {
if (b.m_val[i] & ~a.m_val[i]) return false;
}
return true;
}
/** Check if bitset a is a subset of bitset b (= every 1 bit in a is also in b). */
friend bool operator<<(const MultiIntBitSet& a, const MultiIntBitSet& b) noexcept
{
for (unsigned i = 0; i < N; ++i) {
if (a.m_val[i] & ~b.m_val[i]) return false;
}
return true;
}
/** Check if bitset a and bitset b are identical. */
friend bool operator==(const MultiIntBitSet& a, const MultiIntBitSet& b) noexcept { return a.m_val == b.m_val; }
/** Check if bitset a and bitset b are different. */
friend bool operator!=(const MultiIntBitSet& a, const MultiIntBitSet& b) noexcept { return a.m_val != b.m_val; }
/** Swap two bitsets. */
friend void swap(MultiIntBitSet& a, MultiIntBitSet& b) noexcept { std::swap(a.m_val, b.m_val); }
};
} // namespace bitset_detail
template<unsigned BITS>
using BitSet = std::conditional_t<(BITS <= 32), bitset_detail::IntBitSet<uint32_t>,
std::conditional_t<(BITS <= std::numeric_limits<size_t>::digits), bitset_detail::IntBitSet<size_t>,
bitset_detail::MultiIntBitSet<size_t, (BITS + std::numeric_limits<size_t>::digits - 1) / std::numeric_limits<size_t>::digits>>>;
#endif // BITCOIN_UTIL_BITSET_H

View File

@ -95,6 +95,12 @@ public:
return true;
}
}
bool is_visited(Marker& marker) const EXCLUSIVE_LOCKS_REQUIRED(*this)
{
assert(m_guarded);
return marker.m_marker == m_raw_epoch;
}
};
#define WITH_FRESH_EPOCH(epoch) const Epoch::Guard UNIQUE_NAME(epoch_guard_)(epoch)

View File

@ -610,7 +610,7 @@ private:
* m_conflicts and their descendants. */
CTxMemPool::setEntries m_all_conflicting;
/** All mempool ancestors of this transaction. */
CTxMemPool::setEntries m_ancestors;
CTxMemPool::Entries m_parents;
/** Mempool entry constructed for this transaction. Constructed in PreChecks() but not
* inserted into the mempool until Finalize(). */
std::unique_ptr<CTxMemPoolEntry> m_entry;
@ -657,6 +657,10 @@ private:
// Run checks for mempool replace-by-fee.
bool ReplacementChecks(Workspace& ws) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
// Run cluster size checks (for non-rbf transactions -- RBF is handled
// separately in ReplacementChecks()).
bool ClusterSizeChecks(Workspace& ws) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
// Enforce package mempool ancestor/descendant limits (distinct from individual
// ancestor/descendant limits done in PreChecks).
bool PackageMempoolChecks(const std::vector<CTransactionRef>& txns,
@ -906,79 +910,10 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws)
ws.m_iters_conflicting = m_pool.GetIterSet(ws.m_conflicts);
// Note that these modifications are only applicable to single transaction scenarios;
// carve-outs and package RBF are disabled for multi-transaction evaluations.
CTxMemPool::Limits maybe_rbf_limits = m_pool.m_limits;
// Calculate in-mempool ancestors, up to a limit.
if (ws.m_conflicts.size() == 1) {
// In general, when we receive an RBF transaction with mempool conflicts, we want to know whether we
// would meet the chain limits after the conflicts have been removed. However, there isn't a practical
// way to do this short of calculating the ancestor and descendant sets with an overlay cache of
// changed mempool entries. Due to both implementation and runtime complexity concerns, this isn't
// very realistic, thus we only ensure a limited set of transactions are RBF'able despite mempool
// conflicts here. Importantly, we need to ensure that some transactions which were accepted using
// the below carve-out are able to be RBF'ed, without impacting the security the carve-out provides
// for off-chain contract systems (see link in the comment below).
//
// Specifically, the subset of RBF transactions which we allow despite chain limits are those which
// conflict directly with exactly one other transaction (but may evict children of said transaction),
// and which are not adding any new mempool dependencies. Note that the "no new mempool dependencies"
// check is accomplished later, so we don't bother doing anything about it here, but if our
// policy changes, we may need to move that check to here instead of removing it wholesale.
//
// Such transactions are clearly not merging any existing packages, so we are only concerned with
// ensuring that (a) no package is growing past the package size (not count) limits and (b) we are
// not allowing something to effectively use the (below) carve-out spot when it shouldn't be allowed
// to.
//
// To check these we first check if we meet the RBF criteria, above, and increment the descendant
// limits by the direct conflict and its descendants (as these are recalculated in
// CalculateMempoolAncestors by assuming the new transaction being added is a new descendant, with no
// removals, of each parent's existing dependent set). The ancestor count limits are unmodified (as
// the ancestor limits should be the same for both our new transaction and any conflicts).
// We don't bother incrementing m_limit_descendants by the full removal count as that limit never comes
// into force here (as we're only adding a single transaction).
assert(ws.m_iters_conflicting.size() == 1);
CTxMemPool::txiter conflict = *ws.m_iters_conflicting.begin();
maybe_rbf_limits.descendant_count += 1;
maybe_rbf_limits.descendant_size_vbytes += conflict->GetSizeWithDescendants();
}
auto ancestors{m_pool.CalculateMemPoolAncestors(*entry, maybe_rbf_limits)};
if (!ancestors) {
// If CalculateMemPoolAncestors fails second time, we want the original error string.
// Contracting/payment channels CPFP carve-out:
// If the new transaction is relatively small (up to 40k weight)
// and has at most one ancestor (ie ancestor limit of 2, including
// the new transaction), allow it if its parent has exactly the
// descendant limit descendants.
//
// This allows protocols which rely on distrusting counterparties
// being able to broadcast descendants of an unconfirmed transaction
// to be secure by simply only having two immediately-spendable
// outputs - one for each counterparty. For more info on the uses for
// this, see https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2018-November/016518.html
CTxMemPool::Limits cpfp_carve_out_limits{
.ancestor_count = 2,
.ancestor_size_vbytes = maybe_rbf_limits.ancestor_size_vbytes,
.descendant_count = maybe_rbf_limits.descendant_count + 1,
.descendant_size_vbytes = maybe_rbf_limits.descendant_size_vbytes + EXTRA_DESCENDANT_TX_SIZE_LIMIT,
};
const auto error_message{util::ErrorString(ancestors).original};
if (ws.m_vsize > EXTRA_DESCENDANT_TX_SIZE_LIMIT) {
return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "too-long-mempool-chain", error_message);
}
ancestors = m_pool.CalculateMemPoolAncestors(*entry, cpfp_carve_out_limits);
if (!ancestors) return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "too-long-mempool-chain", error_message);
}
ws.m_ancestors = *ancestors;
// Even though just checking direct mempool parents for inheritance would be sufficient, we
// check using the full ancestor set here because it's more convenient to use what we have
// already calculated.
if (const auto err{SingleV3Checks(ws.m_ptx, ws.m_ancestors, ws.m_conflicts, ws.m_vsize)}) {
if (const auto err{SingleV3Checks(m_pool, ws.m_ptx, ws.m_conflicts, ws.m_vsize)}) {
// Disabled within package validation.
if (err->second != nullptr && args.m_allow_replacement) {
// Potential sibling eviction. Add the sibling to our list of mempool conflicts to be
@ -997,21 +932,38 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws)
return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "v3-rule-violation", err->first);
}
}
ws.m_parents = m_pool.CalculateParentsOf(*ws.m_ptx);
// A transaction that spends outputs that would be replaced by it is invalid. Now
// that we have the set of all ancestors we can detect this
// pathological case by making sure ws.m_conflicts and ws.m_ancestors don't
// intersect.
if (const auto err_string{EntriesAndTxidsDisjoint(ws.m_ancestors, ws.m_conflicts, hash)}) {
// We classify this as a consensus error because a transaction depending on something it
// conflicts with would be inconsistent.
return state.Invalid(TxValidationResult::TX_CONSENSUS, "bad-txns-spends-conflicting-tx", *err_string);
// A transaction that spends outputs that would be replaced by it is invalid. Ensure
// that m_conflicts doesn't intersect with the set of ancestors.
if (!ws.m_conflicts.empty()) {
auto ancestors = m_pool.CalculateMemPoolAncestors(*entry);
if (const auto err_string{EntriesAndTxidsDisjoint(ancestors, ws.m_conflicts, hash)}) {
// We classify this as a consensus error because a transaction depending on something it
// conflicts with would be inconsistent.
return state.Invalid(TxValidationResult::TX_CONSENSUS, "bad-txns-spends-conflicting-tx", *err_string);
}
}
m_rbf = !ws.m_conflicts.empty();
return true;
}
bool MemPoolAccept::ClusterSizeChecks(Workspace& ws)
{
AssertLockHeld(cs_main);
AssertLockHeld(m_pool.cs);
const CTxMemPoolEntry &entry = *ws.m_entry;
TxValidationState& state = ws.m_state;
auto result{m_pool.CheckClusterSizeLimit(entry.GetTxSize(), 1, m_pool.m_limits, ws.m_parents)};
if (!result) {
return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "too-large-cluster", util::ErrorString(result).original);
}
return true;
}
bool MemPoolAccept::ReplacementChecks(Workspace& ws)
{
AssertLockHeld(cs_main);
@ -1022,41 +974,20 @@ bool MemPoolAccept::ReplacementChecks(Workspace& ws)
TxValidationState& state = ws.m_state;
CFeeRate newFeeRate(ws.m_modified_fees, ws.m_vsize);
// Enforce Rule #6. The replacement transaction must have a higher feerate than its direct conflicts.
// - The motivation for this check is to ensure that the replacement transaction is preferable for
// block-inclusion, compared to what would be removed from the mempool.
// - This logic predates ancestor feerate-based transaction selection, which is why it doesn't
// consider feerates of descendants.
// - Note: Ancestor feerate-based transaction selection has made this comparison insufficient to
// guarantee that this is incentive-compatible for miners, because it is possible for a
// descendant transaction of a direct conflict to pay a higher feerate than the transaction that
// might replace them, under these rules.
if (const auto err_string{PaysMoreThanConflicts(ws.m_iters_conflicting, newFeeRate, hash)}) {
// Even though this is a fee-related failure, this result is TX_MEMPOOL_POLICY, not
// TX_RECONSIDERABLE, because it cannot be bypassed using package validation.
// This must be changed if package RBF is enabled.
return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY,
strprintf("insufficient fee%s", ws.m_sibling_eviction ? " (including sibling eviction)" : ""), *err_string);
}
// Calculate all conflicting entries and enforce Rule #5.
if (const auto err_string{GetEntriesForConflicts(tx, m_pool, ws.m_iters_conflicting, ws.m_all_conflicting)}) {
return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY,
strprintf("too many potential replacements%s", ws.m_sibling_eviction ? " (including sibling eviction)" : ""), *err_string);
}
// Enforce Rule #2.
if (const auto err_string{HasNoNewUnconfirmed(tx, m_pool, ws.m_iters_conflicting)}) {
// Sibling eviction is only done for v3 transactions, which cannot have multiple ancestors.
Assume(!ws.m_sibling_eviction);
return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY,
strprintf("replacement-adds-unconfirmed%s", ws.m_sibling_eviction ? " (including sibling eviction)" : ""), *err_string);
}
// Check if it's economically rational to mine this transaction rather than the ones it
// replaces and pays for its own relay fees. Enforce Rules #3 and #4.
for (CTxMemPool::txiter it : ws.m_all_conflicting) {
ws.m_conflicting_fees += it->GetModifiedFee();
ws.m_conflicting_size += it->GetTxSize();
}
if (const auto err_string{PaysForRBF(ws.m_conflicting_fees, ws.m_modified_fees, ws.m_vsize,
m_pool.m_incremental_relay_feerate, hash)}) {
// Even though this is a fee-related failure, this result is TX_MEMPOOL_POLICY, not
@ -1065,6 +996,12 @@ bool MemPoolAccept::ReplacementChecks(Workspace& ws)
return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY,
strprintf("insufficient fee%s", ws.m_sibling_eviction ? " (including sibling eviction)" : ""), *err_string);
}
if (const auto err_string{ImprovesFeerateDiagram(m_pool, ws.m_iters_conflicting, ws.m_all_conflicting, {{&(*ws.m_entry), ws.m_modified_fees}})}) {
// If we can't calculate a feerate, it's because the cluster size limits were hit.
return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "replacement-failed", err_string->second);
}
return true;
}
@ -1160,6 +1097,7 @@ bool MemPoolAccept::Finalize(const ATMPArgs& args, Workspace& ws)
std::unique_ptr<CTxMemPoolEntry>& entry = ws.m_entry;
// Remove conflicting transactions from the mempool
CTxMemPool::Entries all_removals;
for (CTxMemPool::txiter it : ws.m_all_conflicting)
{
LogPrint(BCLog::MEMPOOL, "replacing tx %s (wtxid=%s) with %s (wtxid=%s) for %s additional fees, %d delta bytes\n",
@ -1179,10 +1117,11 @@ bool MemPoolAccept::Finalize(const ATMPArgs& args, Workspace& ws)
entry->GetFee()
);
ws.m_replaced_transactions.push_back(it->GetSharedTx());
all_removals.push_back(it);
}
m_pool.RemoveStaged(ws.m_all_conflicting, false, MemPoolRemovalReason::REPLACED);
m_pool.RemoveStaged(all_removals, MemPoolRemovalReason::REPLACED);
// Store transaction in memory
m_pool.addUnchecked(*entry, ws.m_ancestors);
m_pool.addUnchecked(*entry);
// trim mempool and check if tx was trimmed
// If we are validating a package, don't trim here because we could evict a previous transaction
@ -1224,21 +1163,6 @@ bool MemPoolAccept::SubmitPackage(const ATMPArgs& args, std::vector<Workspace>&
ws.m_ptx->GetHash().ToString()));
}
// Re-calculate mempool ancestors to call addUnchecked(). They may have changed since the
// last calculation done in PreChecks, since package ancestors have already been submitted.
{
auto ancestors{m_pool.CalculateMemPoolAncestors(*ws.m_entry, m_pool.m_limits)};
if(!ancestors) {
results.emplace(ws.m_ptx->GetWitnessHash(), MempoolAcceptResult::Failure(ws.m_state));
// Since PreChecks() and PackageMempoolChecks() both enforce limits, this should never fail.
Assume(false);
all_submitted = false;
package_state.Invalid(PackageValidationResult::PCKG_MEMPOOL_ERROR,
strprintf("BUG! Mempool ancestors or descendants were underestimated: %s",
ws.m_ptx->GetHash().ToString()));
}
ws.m_ancestors = std::move(ancestors).value_or(ws.m_ancestors);
}
// If we call LimitMempoolSize() for each individual Finalize(), the mempool will not take
// the transaction's descendant feerate into account because it hasn't seen them yet. Also,
// we risk evicting a transaction that a subsequent package transaction depends on. Instead,
@ -1304,6 +1228,8 @@ MempoolAcceptResult MemPoolAccept::AcceptSingleTransaction(const CTransactionRef
if (m_rbf && !ReplacementChecks(ws)) return MempoolAcceptResult::Failure(ws.m_state);
if (!m_rbf && !ClusterSizeChecks(ws)) return MempoolAcceptResult::Failure(ws.m_state);
// Perform the inexpensive checks first and avoid hashing and signature verification unless
// those checks pass, to mitigate CPU exhaustion denial-of-service attacks.
if (!PolicyScriptChecks(args, ws)) return MempoolAcceptResult::Failure(ws.m_state);
@ -1385,7 +1311,8 @@ PackageMempoolAcceptResult MemPoolAccept::AcceptMultipleTransactions(const std::
// At this point we have all in-mempool ancestors, and we know every transaction's vsize.
// Run the v3 checks on the package.
for (Workspace& ws : workspaces) {
if (auto err{PackageV3Checks(ws.m_ptx, ws.m_vsize, txns, ws.m_ancestors)}) {
CTxMemPool::Entries parents = m_pool.CalculateParentsOf(*ws.m_ptx);
if (auto err{PackageV3Checks(m_pool, ws.m_ptx, ws.m_vsize, txns, parents)}) {
package_state.Invalid(PackageValidationResult::PCKG_POLICY, "v3-violation", err.value());
return PackageMempoolAcceptResult(package_state, {});
}

View File

@ -29,6 +29,7 @@ class ReplaceByFeeTest(BitcoinTestFramework):
self.extra_args = [
[
"-maxorphantx=1000",
"-limitclustercount=200",
"-limitancestorcount=50",
"-limitancestorsize=101",
"-limitdescendantcount=200",
@ -58,14 +59,12 @@ class ReplaceByFeeTest(BitcoinTestFramework):
self.log.info("Running test spends of conflicting outputs...")
self.test_spends_of_conflicting_outputs()
self.log.info("Running test new unconfirmed inputs...")
self.test_new_unconfirmed_inputs()
# TODO: rework too many replacements test to use direct conflicts only
#self.log.info("Running test too many replacements...")
#self.test_too_many_replacements()
self.log.info("Running test too many replacements...")
self.test_too_many_replacements()
self.log.info("Running test too many replacements using default mempool params...")
self.test_too_many_replacements_with_default_mempool_params()
#self.log.info("Running test too many replacements using default mempool params...")
#self.test_too_many_replacements_with_default_mempool_params()
self.log.info("Running test opt-in...")
self.test_opt_in()
@ -236,6 +235,8 @@ class ReplaceByFeeTest(BitcoinTestFramework):
# Try again, but with more total transactions than the "max txs
# double-spent at once" anti-DoS limit.
# TODO: rework using direct conflict test
'''
for n in (MAX_REPLACEMENT_LIMIT + 1, MAX_REPLACEMENT_LIMIT * 2):
fee = int(0.00001 * COIN)
tx0_outpoint = self.make_utxo(self.nodes[0], initial_nValue)
@ -252,6 +253,7 @@ class ReplaceByFeeTest(BitcoinTestFramework):
for txid in tree_txs:
self.nodes[0].getrawtransaction(txid)
'''
def test_replacement_feeperkb(self):
"""Replacement requires fee-per-KB to be higher"""
@ -274,7 +276,7 @@ class ReplaceByFeeTest(BitcoinTestFramework):
)["hex"]
# This will raise an exception due to insufficient fee
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, 0)
assert_raises_rpc_error(-26, "does not improve feerate diagram", self.nodes[0].sendrawtransaction, tx1b_hex, 0)
def test_spends_of_conflicting_outputs(self):
"""Replacements that spend conflicting tx outputs are rejected"""
@ -315,27 +317,6 @@ class ReplaceByFeeTest(BitcoinTestFramework):
# This will raise an exception
assert_raises_rpc_error(-26, "bad-txns-spends-conflicting-tx", self.nodes[0].sendrawtransaction, tx2_hex, 0)
def test_new_unconfirmed_inputs(self):
"""Replacements that add new unconfirmed inputs are rejected"""
confirmed_utxo = self.make_utxo(self.nodes[0], int(1.1 * COIN))
unconfirmed_utxo = self.make_utxo(self.nodes[0], int(0.1 * COIN), confirmed=False)
self.wallet.send_self_transfer(
from_node=self.nodes[0],
utxo_to_spend=confirmed_utxo,
sequence=0,
fee=Decimal("0.1"),
)
tx2_hex = self.wallet.create_self_transfer_multi(
utxos_to_spend=[confirmed_utxo, unconfirmed_utxo],
sequence=0,
amount_per_output=1 * COIN,
)["hex"]
# This will raise an exception
assert_raises_rpc_error(-26, "replacement-adds-unconfirmed", self.nodes[0].sendrawtransaction, tx2_hex, 0)
def test_too_many_replacements(self):
"""Replacements that evict too many transactions are rejected"""
# Try directly replacing more than MAX_REPLACEMENT_LIMIT
@ -566,7 +547,7 @@ class ReplaceByFeeTest(BitcoinTestFramework):
)["hex"]
# Verify tx1b cannot replace tx1a.
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, 0)
assert_raises_rpc_error(-26, "does not improve feerate diagram", self.nodes[0].sendrawtransaction, tx1b_hex, 0)
# Use prioritisetransaction to set tx1a's fee to 0.
self.nodes[0].prioritisetransaction(txid=tx1a_txid, fee_delta=int(-0.1 * COIN))

View File

@ -3,11 +3,8 @@
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the ZMQ notification interface."""
import os
import struct
import tempfile
from time import sleep
from io import BytesIO
from test_framework.address import (
ADDRESS_BCRT1_P2WSH_OP_TRUE,
@ -20,7 +17,6 @@ from test_framework.blocktools import (
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.messages import (
CBlock,
hash256,
tx_from_hex,
)
@ -32,7 +28,7 @@ from test_framework.util import (
from test_framework.wallet import (
MiniWallet,
)
from test_framework.netutil import test_ipv6_local, test_unix_socket
from test_framework.netutil import test_ipv6_local
# Test may be skipped and not have zmq installed
@ -108,8 +104,9 @@ class ZMQTestSetupBlock:
class ZMQTest (BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
# whitelist peers to speed up tx relay / mempool sync
self.noban_tx_relay = True
# This test isn't testing txn relay/timing, so set whitelist on the
# peers for instant txn relay. This speeds up the test run time 2-3x.
self.extra_args = [["-whitelist=noban@127.0.0.1"]] * self.num_nodes
self.zmq_port_base = p2p_port(self.num_nodes + 1)
def skip_test_if_missing_module(self):
@ -121,10 +118,6 @@ class ZMQTest (BitcoinTestFramework):
self.ctx = zmq.Context()
try:
self.test_basic()
if test_unix_socket():
self.test_basic(unix=True)
else:
self.log.info("Skipping ipc test, because UNIX sockets are not supported.")
self.test_sequence()
self.test_mempool_sync()
self.test_reorg()
@ -145,7 +138,8 @@ class ZMQTest (BitcoinTestFramework):
socket.setsockopt(zmq.IPV6, 1)
subscribers.append(ZMQSubscriber(socket, topic.encode()))
self.restart_node(0, [f"-zmqpub{topic}={address.replace('ipc://', 'unix:')}" for topic, address in services])
self.restart_node(0, [f"-zmqpub{topic}={address}" for topic, address in services] +
self.extra_args[0])
for i, sub in enumerate(subscribers):
sub.socket.connect(services[i][1])
@ -182,19 +176,12 @@ class ZMQTest (BitcoinTestFramework):
return subscribers
def test_basic(self, unix = False):
self.log.info(f"Running basic test with {'ipc' if unix else 'tcp'} protocol")
def test_basic(self):
# Invalid zmq arguments don't take down the node, see #17185.
self.restart_node(0, ["-zmqpubrawtx=foo", "-zmqpubhashtx=bar"])
address = f"tcp://127.0.0.1:{self.zmq_port_base}"
if unix:
# Use the shortest temp path possible since paths may have as little as 92-char limit
socket_path = tempfile.NamedTemporaryFile().name
address = f"ipc://{socket_path}"
subs = self.setup_zmq_test([(topic, address) for topic in ["hashblock", "hashtx", "rawblock", "rawtx"]])
hashblock = subs[0]
@ -216,13 +203,8 @@ class ZMQTest (BitcoinTestFramework):
assert_equal(tx.hash, txid.hex())
# Should receive the generated raw block.
hex = rawblock.receive()
block = CBlock()
block.deserialize(BytesIO(hex))
assert block.is_valid()
assert_equal(block.vtx[0].hash, tx.hash)
assert_equal(len(block.vtx), 1)
assert_equal(genhashes[x], hash256_reversed(hex[:80]).hex())
block = rawblock.receive()
assert_equal(genhashes[x], hash256_reversed(block[:80]).hex())
# Should receive the generated block hash.
hash = hashblock.receive().hex()
@ -260,8 +242,6 @@ class ZMQTest (BitcoinTestFramework):
])
assert_equal(self.nodes[1].getzmqnotifications(), [])
if unix:
os.unlink(socket_path)
def test_reorg(self):
@ -446,14 +426,12 @@ class ZMQTest (BitcoinTestFramework):
mempool_seq += 1
assert_equal((bump_txid, "A", mempool_seq), seq.receive_sequence())
mempool_seq += 1
mempool_seq += len(more_tx)
# Conflict announced first, then block
assert_equal((bump_txid, "R", mempool_seq), seq.receive_sequence())
mempool_seq += 1
assert_equal((tip, "C", None), seq.receive_sequence())
mempool_seq += len(more_tx)
# Last tx
assert_equal((orig_txid_2, "A", mempool_seq), seq.receive_sequence())
mempool_seq += 1
assert_equal((orig_txid_2, "A", mempool_seq), seq.receive_sequence())
self.generatetoaddress(self.nodes[0], 1, ADDRESS_BCRT1_UNSPENDABLE)
self.sync_all() # want to make sure we didn't break "consensus" for other tests

View File

@ -192,31 +192,31 @@ class MempoolAcceptV3(BitcoinTestFramework):
node.reconsiderblock(block[0])
@cleanup(extra_args=["-limitdescendantsize=10", "-datacarriersize=40000", "-acceptnonstdtxn=1"])
@cleanup(extra_args=["-limitclustercount=1", "-datacarriersize=40000", "-acceptnonstdtxn=1"])
def test_nondefault_package_limits(self):
"""
Max standard tx size + v3 rules imply the ancestor/descendant rules (at their default
Max standard tx size + v3 rules imply the cluster rules (at their default
values), but those checks must not be skipped. Ensure both sets of checks are done by
changing the ancestor/descendant limit configurations.
changing the cluster limit configurations.
"""
node = self.nodes[0]
self.log.info("Test that a decreased limitdescendantsize also applies to v3 child")
self.log.info("Test that a decreased cluster size limit also applies to v3 child")
tx_v3_parent_large1 = self.wallet.send_self_transfer(from_node=node, target_weight=99900, version=3)
tx_v3_child_large1 = self.wallet.create_self_transfer(utxo_to_spend=tx_v3_parent_large1["new_utxo"], version=3)
# Child is within v3 limits, but parent's descendant limit is exceeded
# Child is within v3 limits, but cluster limit is exceeded
assert_greater_than(1000, tx_v3_child_large1["tx"].get_vsize())
assert_raises_rpc_error(-26, f"too-long-mempool-chain, exceeds descendant size limit for tx {tx_v3_parent_large1['txid']}", node.sendrawtransaction, tx_v3_child_large1["hex"])
assert_raises_rpc_error(-26, f"too-large-cluster, too many unconfirmed transactions in the cluster [limit: 1]", node.sendrawtransaction, tx_v3_child_large1["hex"])
self.check_mempool([tx_v3_parent_large1["txid"]])
assert_equal(node.getmempoolentry(tx_v3_parent_large1["txid"])["descendantcount"], 1)
self.generate(node, 1)
self.log.info("Test that a decreased limitancestorsize also applies to v3 parent")
self.restart_node(0, extra_args=["-limitancestorsize=10", "-datacarriersize=40000", "-acceptnonstdtxn=1"])
tx_v3_parent_large2 = self.wallet.send_self_transfer(from_node=node, target_weight=99900, version=3)
self.log.info("Test that a decreased limitclustersize also applies to v3 parent")
self.restart_node(0, extra_args=["-limitclustersize=10", "-datacarriersize=40000", "-acceptnonstdtxn=1"])
tx_v3_parent_large2 = self.wallet.send_self_transfer(from_node=node, target_weight=39900, version=3)
tx_v3_child_large2 = self.wallet.create_self_transfer(utxo_to_spend=tx_v3_parent_large2["new_utxo"], version=3)
# Child is within v3 limits
assert_greater_than_or_equal(1000, tx_v3_child_large2["tx"].get_vsize())
assert_raises_rpc_error(-26, f"too-long-mempool-chain, exceeds ancestor size limit", node.sendrawtransaction, tx_v3_child_large2["hex"])
assert_raises_rpc_error(-26, f"too-large-cluster, exceeds cluster size limit", node.sendrawtransaction, tx_v3_child_large2["hex"])
self.check_mempool([tx_v3_parent_large2["txid"]])
@cleanup(extra_args=["-datacarriersize=1000", "-acceptnonstdtxn=1"])
@ -483,7 +483,7 @@ class MempoolAcceptV3(BitcoinTestFramework):
tx_v3_child_2_rule6 = self.wallet.create_self_transfer(
utxo_to_spend=tx_v3_parent["new_utxos"][1], fee_rate=DEFAULT_FEE, version=3
)
rule6_str = f"insufficient fee (including sibling eviction), rejecting replacement {tx_v3_child_2_rule6['txid']}; new feerate"
rule6_str = f"insufficient fee (including sibling eviction), rejecting replacement {tx_v3_child_2_rule6['txid']}"
assert_raises_rpc_error(-26, rule6_str, node.sendrawtransaction, tx_v3_child_2_rule6["hex"])
self.check_mempool([tx_v3_parent['txid'], tx_v3_child_1['txid']])
@ -515,10 +515,11 @@ class MempoolAcceptV3(BitcoinTestFramework):
# Override maxfeerate - it costs a lot to replace these 100 transactions.
assert node.testmempoolaccept([tx_v3_replacement_only["hex"]], maxfeerate=0)[0]["allowed"]
# Adding another one exceeds the limit.
utxos_for_conflict.append(tx_v3_parent["new_utxos"][1])
tx_v3_child_2_rule5 = self.wallet.create_self_transfer_multi(utxos_to_spend=utxos_for_conflict, fee_per_output=4000000, version=3)
rule5_str = f"too many potential replacements (including sibling eviction), rejecting replacement {tx_v3_child_2_rule5['txid']}; too many potential replacements (101 > 100)"
assert_raises_rpc_error(-26, rule5_str, node.sendrawtransaction, tx_v3_child_2_rule5["hex"])
# TODO: rewrite this test given the new RBF rules
#utxos_for_conflict.append(tx_v3_parent["new_utxos"][1])
#tx_v3_child_2_rule5 = self.wallet.create_self_transfer_multi(utxos_to_spend=utxos_for_conflict, fee_per_output=4000000, version=3)
#rule5_str = f"too many potential replacements (including sibling eviction), rejecting replacement {tx_v3_child_2_rule5['txid']}; too many potential replacements (101 > 100)"
#assert_raises_rpc_error(-26, rule5_str, node.sendrawtransaction, tx_v3_child_2_rule5["hex"])
self.check_mempool(txids_v2_100 + [tx_v3_parent["txid"], tx_v3_child_1["txid"]])
self.log.info("Test sibling eviction is successful if it meets all RBF rules")
@ -536,7 +537,7 @@ class MempoolAcceptV3(BitcoinTestFramework):
fee_to_beat_child2 = int(tx_v3_child_2["fee"] * COIN)
tx_v3_child_3 = self.wallet.create_self_transfer_multi(
utxos_to_spend=[tx_v3_parent["new_utxos"][0], utxo_unrelated_conflict], fee_per_output=fee_to_beat_child2*5, version=3
utxos_to_spend=[tx_v3_parent["new_utxos"][0], utxo_unrelated_conflict], fee_per_output=fee_to_beat_child2*10, version=3
)
node.sendrawtransaction(tx_v3_child_3["hex"])
self.check_mempool(txids_v2_100 + [tx_v3_parent["txid"], tx_v3_child_3["txid"]])

View File

@ -0,0 +1,64 @@
#!/usr/bin/env python3
# Copyright (c) 2024 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test cluster mempool accessors and limits"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.wallet import (
MiniWallet,
)
from test_framework.util import (
assert_raises_rpc_error,
)
MAX_CLUSTER_COUNT = 100
class MempoolClusterTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
def run_test(self):
node = self.nodes[0]
self.wallet = MiniWallet(node)
node = self.nodes[0]
parent_tx = self.wallet.send_self_transfer(from_node=node)
utxo_to_spend = parent_tx["new_utxo"]
ancestors = [parent_tx["txid"]]
while len(node.getrawmempool()) < MAX_CLUSTER_COUNT:
next_tx = self.wallet.send_self_transfer(from_node=node, utxo_to_spend=utxo_to_spend)
# Confirm that each transaction is in the same cluster as the first.
assert node.getmempoolentry(next_tx['txid'])['clusterid'] == node.getmempoolentry(parent_tx['txid'])['clusterid']
# Confirm that there is only one cluster in the mempool.
assert node.getmempoolinfo()['numberofclusters'] == 1
# Confirm that the ancestors are what we expect
mempool_ancestors = node.getmempoolancestors(next_tx['txid'])
assert sorted(mempool_ancestors) == sorted(ancestors)
# Confirm that each successive transaction is added as a descendant.
assert all([ next_tx["txid"] in node.getmempooldescendants(x) for x in ancestors ])
# Update for next iteration
ancestors.append(next_tx["txid"])
utxo_to_spend = next_tx["new_utxo"]
assert node.getmempoolinfo()['numberofclusters'] == 1
assert node.getmempoolinfo()['maxclustercount'] == MAX_CLUSTER_COUNT
assert node.getmempoolcluster(node.getmempoolentry(parent_tx['txid'])['clusterid'])['txcount'] == MAX_CLUSTER_COUNT
feeratediagram = node.getmempoolfeeratediagram()
last_val = [0, 0]
for x in feeratediagram:
assert x['size'] > 0
assert last_val[0]*x['fee'] >= last_val[1]*x['size']
last_val = [x['size'], x['fee']]
# Test that adding one more transaction to the cluster will fail.
bad_tx = self.wallet.create_self_transfer(utxo_to_spend=utxo_to_spend)
assert_raises_rpc_error(-26, "too-large-cluster", node.sendrawtransaction, bad_tx["hex"])
# TODO: verify that the size limits are also enforced.
if __name__ == '__main__':
MempoolClusterTest().main()

View File

@ -43,7 +43,7 @@ class MempoolLimitTest(BitcoinTestFramework):
# B: First transaction in package, RBFs A by itself under individual evaluation, which would give it +1 descendant limit
# C: Second transaction in package, spends B. If the +1 descendant limit persisted, would make it into mempool
self.restart_node(0, extra_args=self.extra_args[0] + ["-limitancestorcount=2", "-limitdescendantcount=1"])
self.restart_node(0, extra_args=self.extra_args[0] + ["-limitclustercount=1"])
# Generate a confirmed utxo we will double-spend
rbf_utxo = self.wallet.send_self_transfer(
@ -81,7 +81,7 @@ class MempoolLimitTest(BitcoinTestFramework):
)
res = node.submitpackage([tx_B["hex"], tx_C["hex"]])
assert_equal(res["package_msg"], "transaction failed")
assert "too-long-mempool-chain" in res["tx-results"][tx_C["wtxid"]]["error"]
assert "too-large-cluster" in res["tx-results"][tx_C["wtxid"]]["error"]
def test_mid_package_eviction(self):
node = self.nodes[0]

View File

@ -42,6 +42,7 @@ class MempoolPackageLimitsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [["-limitclustercount=25"]]
def run_test(self):
self.wallet = MiniWallet(self.nodes[0])
@ -51,13 +52,9 @@ class MempoolPackageLimitsTest(BitcoinTestFramework):
self.test_chain_limits()
self.test_desc_count_limits()
self.test_desc_count_limits_2()
self.test_anc_count_limits()
self.test_anc_count_limits_2()
self.test_anc_count_limits_bushy()
# The node will accept (nonstandard) extra large OP_RETURN outputs
self.restart_node(0, extra_args=["-datacarriersize=100000"])
self.test_anc_size_limits()
self.test_desc_size_limits()
@check_package_limits
@ -168,145 +165,6 @@ class MempoolPackageLimitsTest(BitcoinTestFramework):
assert_equal(2, len(package_hex))
return package_hex
@check_package_limits
def test_anc_count_limits(self):
"""Create a 'V' shaped chain with 24 transactions in the mempool and 3 in the package:
M1a M1b
^ ^
M2a M2b
. .
. .
. .
M12a M12b
^ ^
Pa Pb
^ ^
Pc
The lowest descendant, Pc, exceeds ancestor limits, but only if the in-mempool
and in-package ancestors are all considered together.
"""
node = self.nodes[0]
package_hex = []
pc_parent_utxos = []
self.log.info("Check that in-mempool and in-package ancestors are calculated properly in packages")
# Two chains of 13 transactions each
for _ in range(2):
chain_tip_utxo = self.wallet.send_self_transfer_chain(from_node=node, chain_length=12)[-1]["new_utxo"]
# Save the 13th transaction for the package
tx = self.wallet.create_self_transfer(utxo_to_spend=chain_tip_utxo)
package_hex.append(tx["hex"])
pc_parent_utxos.append(tx["new_utxo"])
# Child Pc
pc_hex = self.wallet.create_self_transfer_multi(utxos_to_spend=pc_parent_utxos)["hex"]
package_hex.append(pc_hex)
assert_equal(24, node.getmempoolinfo()["size"])
assert_equal(3, len(package_hex))
return package_hex
@check_package_limits
def test_anc_count_limits_2(self):
"""Create a 'Y' shaped chain with 24 transactions in the mempool and 2 in the package:
M1a M1b
^ ^
M2a M2b
. .
. .
. .
M12a M12b
^ ^
Pc
^
Pd
The lowest descendant, Pd, exceeds ancestor limits, but only if the in-mempool
and in-package ancestors are all considered together.
"""
node = self.nodes[0]
pc_parent_utxos = []
self.log.info("Check that in-mempool and in-package ancestors are calculated properly in packages")
# Two chains of 12 transactions each
for _ in range(2):
chaintip_utxo = self.wallet.send_self_transfer_chain(from_node=node, chain_length=12)[-1]["new_utxo"]
# last 2 transactions will be the parents of Pc
pc_parent_utxos.append(chaintip_utxo)
# Child Pc
pc_tx = self.wallet.create_self_transfer_multi(utxos_to_spend=pc_parent_utxos)
# Child Pd
pd_tx = self.wallet.create_self_transfer(utxo_to_spend=pc_tx["new_utxos"][0])
assert_equal(24, node.getmempoolinfo()["size"])
return [pc_tx["hex"], pd_tx["hex"]]
@check_package_limits
def test_anc_count_limits_bushy(self):
"""Create a tree with 20 transactions in the mempool and 6 in the package:
M1...M4 M5...M8 M9...M12 M13...M16 M17...M20
^ ^ ^ ^ ^ (each with 4 parents)
P0 P1 P2 P3 P4
^ ^ ^ ^ ^ (5 parents)
PC
Where M(4i+1)...M+(4i+4) are the parents of Pi and P0, P1, P2, P3, and P4 are the parents of PC.
P0... P4 individually only have 4 parents each, and PC has no in-mempool parents. But
combined, PC has 25 in-mempool and in-package parents.
"""
node = self.nodes[0]
package_hex = []
pc_parent_utxos = []
for _ in range(5): # Make package transactions P0 ... P4
pc_grandparent_utxos = []
for _ in range(4): # Make mempool transactions M(4i+1)...M(4i+4)
pc_grandparent_utxos.append(self.wallet.send_self_transfer(from_node=node)["new_utxo"])
# Package transaction Pi
pi_tx = self.wallet.create_self_transfer_multi(utxos_to_spend=pc_grandparent_utxos)
package_hex.append(pi_tx["hex"])
pc_parent_utxos.append(pi_tx["new_utxos"][0])
# Package transaction PC
pc_hex = self.wallet.create_self_transfer_multi(utxos_to_spend=pc_parent_utxos)["hex"]
package_hex.append(pc_hex)
assert_equal(20, node.getmempoolinfo()["size"])
assert_equal(6, len(package_hex))
return package_hex
@check_package_limits
def test_anc_size_limits(self):
"""Test Case with 2 independent transactions in the mempool and a parent + child in the
package, where the package parent is the child of both mempool transactions (30KvB each):
A B
^ ^
C
^
D
The lowest descendant, D, exceeds ancestor size limits, but only if the in-mempool
and in-package ancestors are all considered together.
"""
node = self.nodes[0]
parent_utxos = []
target_vsize = 30_000
high_fee = 10 * target_vsize # 10 sats/vB
target_weight = target_vsize * WITNESS_SCALE_FACTOR
self.log.info("Check that in-mempool and in-package ancestor size limits are calculated properly in packages")
# Mempool transactions A and B
for _ in range(2):
bulked_tx = self.wallet.create_self_transfer(target_weight=target_weight)
self.wallet.sendrawtransaction(from_node=node, tx_hex=bulked_tx["hex"])
parent_utxos.append(bulked_tx["new_utxo"])
# Package transaction C
pc_tx = self.wallet.create_self_transfer_multi(utxos_to_spend=parent_utxos, fee_per_output=high_fee, target_weight=target_weight)
# Package transaction D
pd_tx = self.wallet.create_self_transfer(utxo_to_spend=pc_tx["new_utxos"][0], target_weight=target_weight)
assert_equal(2, node.getmempoolinfo()["size"])
return [pc_tx["hex"], pd_tx["hex"]]
@check_package_limits
def test_desc_size_limits(self):
"""Create 3 mempool transactions and 2 package transactions (21KvB each):

View File

@ -1,72 +0,0 @@
#!/usr/bin/env python3
# Copyright (c) 2014-2022 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test descendant package tracking carve-out allowing one final transaction in
an otherwise-full package as long as it has only one parent and is <= 10k in
size.
"""
from test_framework.messages import (
DEFAULT_ANCESTOR_LIMIT,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
from test_framework.wallet import MiniWallet
class MempoolPackagesTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [["-maxorphantx=1000"]]
def chain_tx(self, utxos_to_spend, *, num_outputs=1):
return self.wallet.send_self_transfer_multi(
from_node=self.nodes[0],
utxos_to_spend=utxos_to_spend,
num_outputs=num_outputs)['new_utxos']
def run_test(self):
self.wallet = MiniWallet(self.nodes[0])
# DEFAULT_ANCESTOR_LIMIT transactions off a confirmed tx should be fine
chain = []
utxo = self.wallet.get_utxo()
for _ in range(4):
utxo, utxo2 = self.chain_tx([utxo], num_outputs=2)
chain.append(utxo2)
for _ in range(DEFAULT_ANCESTOR_LIMIT - 4):
utxo, = self.chain_tx([utxo])
chain.append(utxo)
second_chain, = self.chain_tx([self.wallet.get_utxo()])
# Check mempool has DEFAULT_ANCESTOR_LIMIT + 1 transactions in it
assert_equal(len(self.nodes[0].getrawmempool()), DEFAULT_ANCESTOR_LIMIT + 1)
# Adding one more transaction on to the chain should fail.
assert_raises_rpc_error(-26, "too-long-mempool-chain, too many unconfirmed ancestors [limit: 25]", self.chain_tx, [utxo])
# ...even if it chains on from some point in the middle of the chain.
assert_raises_rpc_error(-26, "too-long-mempool-chain, too many descendants", self.chain_tx, [chain[2]])
assert_raises_rpc_error(-26, "too-long-mempool-chain, too many descendants", self.chain_tx, [chain[1]])
# ...even if it chains on to two parent transactions with one in the chain.
assert_raises_rpc_error(-26, "too-long-mempool-chain, too many descendants", self.chain_tx, [chain[0], second_chain])
# ...especially if its > 40k weight
assert_raises_rpc_error(-26, "too-long-mempool-chain, too many descendants", self.chain_tx, [chain[0]], num_outputs=350)
# But not if it chains directly off the first transaction
replacable_tx = self.wallet.send_self_transfer_multi(from_node=self.nodes[0], utxos_to_spend=[chain[0]])['tx']
# and the second chain should work just fine
self.chain_tx([second_chain])
# Make sure we can RBF the chain which used our carve-out rule
replacable_tx.vout[0].nValue -= 1000000
self.nodes[0].sendrawtransaction(replacable_tx.serialize().hex())
# Finally, check that we added two transactions
assert_equal(len(self.nodes[0].getrawmempool()), DEFAULT_ANCESTOR_LIMIT + 3)
if __name__ == '__main__':
MempoolPackagesTest().main()

View File

@ -4,25 +4,17 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test descendant package tracking code."""
from decimal import Decimal
from test_framework.messages import (
DEFAULT_ANCESTOR_LIMIT,
DEFAULT_DESCENDANT_LIMIT,
)
from test_framework.p2p import P2PTxInvStore
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
from test_framework.wallet import MiniWallet
# custom limits for node1
CUSTOM_ANCESTOR_LIMIT = 5
CUSTOM_DESCENDANT_LIMIT = 10
assert CUSTOM_DESCENDANT_LIMIT >= CUSTOM_ANCESTOR_LIMIT
class MempoolPackagesTest(BitcoinTestFramework):
def set_test_params(self):
@ -35,8 +27,8 @@ class MempoolPackagesTest(BitcoinTestFramework):
],
[
"-maxorphantx=1000",
"-limitancestorcount={}".format(CUSTOM_ANCESTOR_LIMIT),
"-limitdescendantcount={}".format(CUSTOM_DESCENDANT_LIMIT),
"-limitclustercount={}".format(CUSTOM_DESCENDANT_LIMIT),
],
]
@ -44,169 +36,6 @@ class MempoolPackagesTest(BitcoinTestFramework):
self.wallet = MiniWallet(self.nodes[0])
self.wallet.rescan_utxos()
peer_inv_store = self.nodes[0].add_p2p_connection(P2PTxInvStore()) # keep track of invs
# DEFAULT_ANCESTOR_LIMIT transactions off a confirmed tx should be fine
chain = self.wallet.create_self_transfer_chain(chain_length=DEFAULT_ANCESTOR_LIMIT)
witness_chain = [t["wtxid"] for t in chain]
ancestor_vsize = 0
ancestor_fees = Decimal(0)
for i, t in enumerate(chain):
ancestor_vsize += t["tx"].get_vsize()
ancestor_fees += t["fee"]
self.wallet.sendrawtransaction(from_node=self.nodes[0], tx_hex=t["hex"])
# Wait until mempool transactions have passed initial broadcast (sent inv and received getdata)
# Otherwise, getrawmempool may be inconsistent with getmempoolentry if unbroadcast changes in between
peer_inv_store.wait_for_broadcast(witness_chain)
# Check mempool has DEFAULT_ANCESTOR_LIMIT transactions in it, and descendant and ancestor
# count and fees should look correct
mempool = self.nodes[0].getrawmempool(True)
assert_equal(len(mempool), DEFAULT_ANCESTOR_LIMIT)
descendant_count = 1
descendant_fees = 0
descendant_vsize = 0
assert_equal(ancestor_vsize, sum([mempool[tx]['vsize'] for tx in mempool]))
ancestor_count = DEFAULT_ANCESTOR_LIMIT
assert_equal(ancestor_fees, sum([mempool[tx]['fees']['base'] for tx in mempool]))
# Adding one more transaction on to the chain should fail.
next_hop = self.wallet.create_self_transfer(utxo_to_spend=chain[-1]["new_utxo"])["hex"]
assert_raises_rpc_error(-26, "too-long-mempool-chain", lambda: self.nodes[0].sendrawtransaction(next_hop))
descendants = []
ancestors = [t["txid"] for t in chain]
chain = [t["txid"] for t in chain]
for x in reversed(chain):
# Check that getmempoolentry is consistent with getrawmempool
entry = self.nodes[0].getmempoolentry(x)
assert_equal(entry, mempool[x])
# Check that gettxspendingprevout is consistent with getrawmempool
witnesstx = self.nodes[0].getrawtransaction(txid=x, verbose=True)
for tx_in in witnesstx["vin"]:
spending_result = self.nodes[0].gettxspendingprevout([ {'txid' : tx_in["txid"], 'vout' : tx_in["vout"]} ])
assert_equal(spending_result, [ {'txid' : tx_in["txid"], 'vout' : tx_in["vout"], 'spendingtxid' : x} ])
# Check that the descendant calculations are correct
assert_equal(entry['descendantcount'], descendant_count)
descendant_fees += entry['fees']['base']
assert_equal(entry['fees']['modified'], entry['fees']['base'])
assert_equal(entry['fees']['descendant'], descendant_fees)
descendant_vsize += entry['vsize']
assert_equal(entry['descendantsize'], descendant_vsize)
descendant_count += 1
# Check that ancestor calculations are correct
assert_equal(entry['ancestorcount'], ancestor_count)
assert_equal(entry['fees']['ancestor'], ancestor_fees)
assert_equal(entry['ancestorsize'], ancestor_vsize)
ancestor_vsize -= entry['vsize']
ancestor_fees -= entry['fees']['base']
ancestor_count -= 1
# Check that parent/child list is correct
assert_equal(entry['spentby'], descendants[-1:])
assert_equal(entry['depends'], ancestors[-2:-1])
# Check that getmempooldescendants is correct
assert_equal(sorted(descendants), sorted(self.nodes[0].getmempooldescendants(x)))
# Check getmempooldescendants verbose output is correct
for descendant, dinfo in self.nodes[0].getmempooldescendants(x, True).items():
assert_equal(dinfo['depends'], [chain[chain.index(descendant)-1]])
if dinfo['descendantcount'] > 1:
assert_equal(dinfo['spentby'], [chain[chain.index(descendant)+1]])
else:
assert_equal(dinfo['spentby'], [])
descendants.append(x)
# Check that getmempoolancestors is correct
ancestors.remove(x)
assert_equal(sorted(ancestors), sorted(self.nodes[0].getmempoolancestors(x)))
# Check that getmempoolancestors verbose output is correct
for ancestor, ainfo in self.nodes[0].getmempoolancestors(x, True).items():
assert_equal(ainfo['spentby'], [chain[chain.index(ancestor)+1]])
if ainfo['ancestorcount'] > 1:
assert_equal(ainfo['depends'], [chain[chain.index(ancestor)-1]])
else:
assert_equal(ainfo['depends'], [])
# Check that getmempoolancestors/getmempooldescendants correctly handle verbose=true
v_ancestors = self.nodes[0].getmempoolancestors(chain[-1], True)
assert_equal(len(v_ancestors), len(chain)-1)
for x in v_ancestors.keys():
assert_equal(mempool[x], v_ancestors[x])
assert chain[-1] not in v_ancestors.keys()
v_descendants = self.nodes[0].getmempooldescendants(chain[0], True)
assert_equal(len(v_descendants), len(chain)-1)
for x in v_descendants.keys():
assert_equal(mempool[x], v_descendants[x])
assert chain[0] not in v_descendants.keys()
# Check that ancestor modified fees includes fee deltas from
# prioritisetransaction
self.nodes[0].prioritisetransaction(txid=chain[0], fee_delta=1000)
ancestor_fees = 0
for x in chain:
entry = self.nodes[0].getmempoolentry(x)
ancestor_fees += entry['fees']['base']
assert_equal(entry['fees']['ancestor'], ancestor_fees + Decimal('0.00001'))
# Undo the prioritisetransaction for later tests
self.nodes[0].prioritisetransaction(txid=chain[0], fee_delta=-1000)
# Check that descendant modified fees includes fee deltas from
# prioritisetransaction
self.nodes[0].prioritisetransaction(txid=chain[-1], fee_delta=1000)
descendant_fees = 0
for x in reversed(chain):
entry = self.nodes[0].getmempoolentry(x)
descendant_fees += entry['fees']['base']
assert_equal(entry['fees']['descendant'], descendant_fees + Decimal('0.00001'))
# Check that prioritising a tx before it's added to the mempool works
# First clear the mempool by mining a block.
self.generate(self.nodes[0], 1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
# Prioritise a transaction that has been mined, then add it back to the
# mempool by using invalidateblock.
self.nodes[0].prioritisetransaction(txid=chain[-1], fee_delta=2000)
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Keep node1's tip synced with node0
self.nodes[1].invalidateblock(self.nodes[1].getbestblockhash())
# Now check that the transaction is in the mempool, with the right modified fee
descendant_fees = 0
for x in reversed(chain):
entry = self.nodes[0].getmempoolentry(x)
descendant_fees += entry['fees']['base']
if (x == chain[-1]):
assert_equal(entry['fees']['modified'], entry['fees']['base'] + Decimal("0.00002"))
assert_equal(entry['fees']['descendant'], descendant_fees + Decimal("0.00002"))
# Check that node1's mempool is as expected (-> custom ancestor limit)
mempool0 = self.nodes[0].getrawmempool(False)
mempool1 = self.nodes[1].getrawmempool(False)
assert_equal(len(mempool1), CUSTOM_ANCESTOR_LIMIT)
assert set(mempool1).issubset(set(mempool0))
for tx in chain[:CUSTOM_ANCESTOR_LIMIT]:
assert tx in mempool1
# TODO: more detailed check of node1's mempool (fees etc.)
# check transaction unbroadcast info (should be false if in both mempools)
mempool = self.nodes[0].getrawmempool(True)
for tx in mempool:
assert_equal(mempool[tx]['unbroadcast'], False)
# TODO: test ancestor size limits
# Now test descendant chain limits
tx_children = []
@ -233,24 +62,18 @@ class MempoolPackagesTest(BitcoinTestFramework):
for child in tx_children:
assert_equal(mempool[child]['depends'], [parent_transaction])
# Sending one more chained transaction will fail
next_hop = self.wallet.create_self_transfer(utxo_to_spend=transaction_package.pop(0))["hex"]
assert_raises_rpc_error(-26, "too-long-mempool-chain", lambda: self.nodes[0].sendrawtransaction(next_hop))
# Check that node1's mempool is as expected, containing:
# - txs from previous ancestor test (-> custom ancestor limit)
# - parent tx for descendant test
# - txs chained off parent tx (-> custom descendant limit)
self.wait_until(lambda: len(self.nodes[1].getrawmempool()) ==
CUSTOM_ANCESTOR_LIMIT + 1 + CUSTOM_DESCENDANT_LIMIT, timeout=10)
self.wait_until(lambda: len(self.nodes[1].getrawmempool()) == CUSTOM_DESCENDANT_LIMIT, timeout=10)
mempool0 = self.nodes[0].getrawmempool(False)
mempool1 = self.nodes[1].getrawmempool(False)
assert set(mempool1).issubset(set(mempool0))
assert parent_transaction in mempool1
for tx in chain[:CUSTOM_DESCENDANT_LIMIT]:
assert tx in mempool1
for tx in chain[CUSTOM_DESCENDANT_LIMIT:]:
assert tx not in mempool1
for tx in chain:
if tx in mempool1:
entry = self.nodes[1].getmempoolentry(tx)
assert entry["descendantcount"] <= CUSTOM_DESCENDANT_LIMIT
# TODO: more detailed check of node1's mempool (fees etc.)
# TODO: test descendant size limits

View File

@ -97,6 +97,9 @@ class MempoolPersistTest(BitcoinTestFramework):
assert_equal(total_fee_old, sum(v['fees']['base'] for k, v in self.nodes[0].getrawmempool(verbose=True).items()))
last_entry = self.nodes[0].getmempoolentry(txid=last_txid)
del last_entry["fees"]["chunk"]
del last_entry["clusterid"]
del last_entry["chunksize"]
tx_creation_time = last_entry['time']
assert_greater_than_or_equal(tx_creation_time, tx_creation_time_lower)
assert_greater_than_or_equal(tx_creation_time_higher, tx_creation_time)
@ -131,7 +134,11 @@ class MempoolPersistTest(BitcoinTestFramework):
assert_equal(fees['base'] + Decimal('0.00001000'), fees['modified'])
self.log.debug('Verify all fields are loaded correctly')
assert_equal(last_entry, self.nodes[0].getmempoolentry(txid=last_txid))
new_entry = self.nodes[0].getmempoolentry(txid=last_txid)
del new_entry["fees"]["chunk"]
del new_entry["clusterid"]
del new_entry["chunksize"]
assert_equal(last_entry, new_entry)
self.nodes[0].sendrawtransaction(tx_prioritised_not_submitted['hex'])
entry_prioritised_before_restart = self.nodes[0].getmempoolentry(txid=tx_prioritised_not_submitted['txid'])
assert_equal(entry_prioritised_before_restart['fees']['base'] + Decimal('0.00009999'), entry_prioritised_before_restart['fees']['modified'])

View File

@ -163,14 +163,17 @@ class BytesPerSigOpTest(BitcoinTestFramework):
assert_equal(parent_individual_testres["vsize"], max_multisig_vsize)
# But together, it's exceeding limits in the *package* context. If sigops adjusted vsize wasn't being checked
# here, it would get further in validation and give too-long-mempool-chain error instead.
# here, it would get further in validation and give too-large-cluster error instead.
packet_test = self.nodes[0].testmempoolaccept([tx_parent.serialize().hex(), tx_child.serialize().hex()])
expected_package_error = f"package-mempool-limits, package size {2*max_multisig_vsize} exceeds ancestor size limit [limit: 101000]"
expected_package_error = f"package-mempool-limits, exceeds cluster size limit [limit: 101000]"
assert_equal([x["package-error"] for x in packet_test], [expected_package_error] * 2)
# When we actually try to submit, the parent makes it into the mempool, but the child would exceed ancestor vsize limits
res = self.nodes[0].submitpackage([tx_parent.serialize().hex(), tx_child.serialize().hex()])
assert "too-long-mempool-chain" in res["tx-results"][tx_child.getwtxid()]["error"]
assert "too-large-cluster" in res["tx-results"][tx_child.getwtxid()]["error"]
# When we actually try to submit, the parent makes it into the mempool, but the child would exceed cluster vsize limits
#assert_raises_rpc_error(-26, "too-large-cluster", self.nodes[0].submitpackage, [tx_parent.serialize().hex(), tx_child.serialize().hex()])
assert tx_parent.rehash() in self.nodes[0].getrawmempool()
# Transactions are tiny in weight

View File

@ -18,7 +18,7 @@ from test_framework.wallet import MiniWallet
class MempoolUpdateFromBlockTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [['-limitdescendantsize=1000', '-limitancestorsize=1000', '-limitancestorcount=100']]
self.extra_args = [['-limitdescendantsize=1000', '-limitancestorsize=1000', '-limitancestorcount=100', "-limitclustersize=1000"]]
def transaction_graph_test(self, size, n_tx_to_mine=None, fee=100_000):
"""Create an acyclic tournament (a type of directed graph) of transactions and use it for testing.

View File

@ -109,6 +109,14 @@ class PrioritiseTransactionTest(BitcoinTestFramework):
raw_before[txid_c]["fees"]["descendant"] += fee_delta_c_1 + fee_delta_c_2
raw_before[txid_d]["fees"]["ancestor"] += fee_delta_b + fee_delta_c_1 + fee_delta_c_2
raw_after = self.nodes[0].getrawmempool(verbose=True)
for txid in [txid_a, txid_b, txid_c, txid_d]:
del raw_before[txid]["fees"]["chunk"]
del raw_before[txid]["chunksize"]
del raw_before[txid]["clusterid"]
for txid in [txid_a, txid_b, txid_c, txid_d]:
del raw_after[txid]["fees"]["chunk"]
del raw_after[txid]["chunksize"]
del raw_after[txid]["clusterid"]
assert_equal(raw_before[txid_a], raw_after[txid_a])
assert_equal(raw_before, raw_after)
assert_equal(self.nodes[0].getprioritisedtransactions(), {txid_b: {"fee_delta" : fee_delta_b*COIN, "in_mempool" : True, "modified_fee": int(fee_delta_b*COIN + COIN * tx_o_b["fee"])}, txid_c: {"fee_delta" : (fee_delta_c_1 + fee_delta_c_2)*COIN, "in_mempool" : True, "modified_fee": int((fee_delta_c_1 + fee_delta_c_2 ) * COIN + COIN * tx_o_c["fee"])}})
@ -128,6 +136,10 @@ class PrioritiseTransactionTest(BitcoinTestFramework):
for t in [tx_o_a["hex"], tx_o_b["hex"], tx_o_c["hex"], tx_o_d["hex"]]:
self.nodes[0].sendrawtransaction(t)
raw_after = self.nodes[0].getrawmempool(verbose=True)
for txid in [txid_a, txid_b, txid_c, txid_d]:
del raw_after[txid]["fees"]["chunk"]
del raw_after[txid]["chunksize"]
del raw_after[txid]["clusterid"]
assert_equal(raw_before[txid_a], raw_after[txid_a])
assert_equal(raw_before, raw_after)
assert_equal(self.nodes[0].getprioritisedtransactions(), {txid_b: {"fee_delta" : fee_delta_b*COIN, "in_mempool" : True, "modified_fee": int(fee_delta_b*COIN + COIN * tx_o_b["fee"])}, txid_c: {"fee_delta" : (fee_delta_c_1 + fee_delta_c_2)*COIN, "in_mempool" : True, "modified_fee": int((fee_delta_c_1 + fee_delta_c_2 ) * COIN + COIN * tx_o_c["fee"])}})

View File

@ -267,7 +267,6 @@ BASE_SCRIPTS = [
'feature_utxo_set_hash.py',
'feature_rbf.py',
'mempool_packages.py',
'mempool_package_onemore.py',
'mempool_package_limits.py',
'feature_versionbits_warning.py',
'rpc_preciousblock.py',
@ -362,6 +361,7 @@ BASE_SCRIPTS = [
'p2p_sendtxrcncl.py',
'rpc_scantxoutset.py',
'feature_unsupported_utxo_db.py',
'mempool_cluster.py',
'feature_logging.py',
'feature_anchors.py',
'mempool_datacarrier.py',

View File

@ -56,9 +56,9 @@ class WalletTest(BitcoinTestFramework):
# whitelist peers to speed up tx relay / mempool sync
self.noban_tx_relay = True
self.extra_args = [
# Limit mempool descendants as a hack to have wallet txs rejected from the mempool.
# Limit mempool clusters as a hack to have wallet txs rejected from the mempool.
# Set walletrejectlongchains=0 so the wallet still creates the transactions.
['-limitdescendantcount=3', '-walletrejectlongchains=0'],
['-limitclustercount=3', '-walletrejectlongchains=0'],
[],
]

View File

@ -587,9 +587,9 @@ class WalletTest(BitcoinTestFramework):
self.log.info("Test -reindex")
self.stop_nodes()
# set lower ancestor limit for later
self.start_node(0, ['-reindex', "-walletrejectlongchains=0", "-limitancestorcount=" + str(chainlimit)])
self.start_node(1, ['-reindex', "-limitancestorcount=" + str(chainlimit)])
self.start_node(2, ['-reindex', "-limitancestorcount=" + str(chainlimit)])
self.start_node(0, ['-reindex', "-walletrejectlongchains=0", "-limitancestorcount=" + str(chainlimit), "-limitclustercount=" + str(chainlimit)])
self.start_node(1, ['-reindex', "-limitclustercount=" + str(chainlimit)])
self.start_node(2, ['-reindex', "-limitclustercount=" + str(chainlimit)])
# reindex will leave rpc warm up "early"; Wait for it to finish
self.wait_until(lambda: [block_count] * 3 == [self.nodes[i].getblockcount() for i in range(3)])
assert_equal(balance_nodes, [self.nodes[i].getbalance() for i in range(3)])
@ -630,7 +630,7 @@ class WalletTest(BitcoinTestFramework):
# Try with walletrejectlongchains
# Double chain limit but require combining inputs, so we pass AttemptSelection
self.stop_node(0)
extra_args = ["-walletrejectlongchains", "-limitancestorcount=" + str(2 * chainlimit)]
extra_args = ["-walletrejectlongchains", "-limitclustercount=" + str(2 * chainlimit), "-limitancestorcount=" + str(2*chainlimit)]
self.start_node(0, extra_args=extra_args)
# wait until the wallet has submitted all transactions to the mempool
@ -641,7 +641,7 @@ class WalletTest(BitcoinTestFramework):
node0_balance = self.nodes[0].getbalance()
# With walletrejectlongchains we will not create the tx and store it in our wallet.
assert_raises_rpc_error(-6, f"too many unconfirmed ancestors [limit: {chainlimit * 2}]", self.nodes[0].sendtoaddress, sending_addr, node0_balance - Decimal('0.01'))
assert_raises_rpc_error(-6, f"too many unconfirmed transactions in the cluster [limit: {chainlimit * 2}]", self.nodes[0].sendtoaddress, sending_addr, node0_balance - Decimal('0.01'))
# Verify nothing new in wallet
assert_equal(total_txs, len(self.nodes[0].listtransactions("*", 99999)))

View File

@ -29,6 +29,7 @@ EXPECTED_BOOST_INCLUDES = ["boost/date_time/posix_time/posix_time.hpp",
"boost/multi_index/sequenced_index.hpp",
"boost/multi_index/tag.hpp",
"boost/multi_index_container.hpp",
"boost/multi_index/member.hpp",
"boost/operators.hpp",
"boost/signals2/connection.hpp",
"boost/signals2/optional_last_value.hpp",

View File

@ -57,6 +57,7 @@ unsigned-integer-overflow:CBlockPolicyEstimator::processBlockTx
unsigned-integer-overflow:TxConfirmStats::EstimateMedianVal
unsigned-integer-overflow:prevector.h
unsigned-integer-overflow:EvalScript
unsigned-integer-overflow:util/bitset.h
unsigned-integer-overflow:xoroshiro128plusplus.h
implicit-integer-sign-change:CBlockPolicyEstimator::processBlockTx
implicit-integer-sign-change:SetStdinEcho