bitcoin/src/txorphanage.cpp

318 lines
12 KiB
C++

// Copyright (c) 2021-2022 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <txorphanage.h>
#include <consensus/validation.h>
#include <logging.h>
#include <policy/policy.h>
#include <primitives/transaction.h>
#include <cassert>
/** Expiration time for orphan transactions in seconds */
static constexpr int64_t ORPHAN_TX_EXPIRE_TIME = 20 * 60;
/** Minimum time between orphan transactions expire time checks in seconds */
static constexpr int64_t ORPHAN_TX_EXPIRE_INTERVAL = 5 * 60;
bool TxOrphanage::AddTx(const CTransactionRef& tx, NodeId peer)
{
LOCK(m_mutex);
const Txid& hash = tx->GetHash();
const Wtxid& wtxid = tx->GetWitnessHash();
if (m_orphans.count(hash))
return false;
// Ignore big transactions, to avoid a
// send-big-orphans memory exhaustion attack. If a peer has a legitimate
// large transaction with a missing parent then we assume
// it will rebroadcast it later, after the parent transaction(s)
// have been mined or received.
// 100 orphans, each of which is at most 100,000 bytes big is
// at most 10 megabytes of orphans and somewhat more byprev index (in the worst case):
unsigned int sz = GetTransactionWeight(*tx);
if (sz > MAX_STANDARD_TX_WEIGHT)
{
LogPrint(BCLog::TXPACKAGES, "ignoring large orphan tx (size: %u, txid: %s, wtxid: %s)\n", sz, hash.ToString(), wtxid.ToString());
return false;
}
auto ret = m_orphans.emplace(hash, OrphanTx{tx, peer, GetTime() + ORPHAN_TX_EXPIRE_TIME, m_orphan_list.size()});
assert(ret.second);
m_orphan_list.push_back(ret.first);
// Allow for lookups in the orphan pool by wtxid, as well as txid
m_wtxid_to_orphan_it.emplace(tx->GetWitnessHash(), ret.first);
for (const CTxIn& txin : tx->vin) {
m_outpoint_to_orphan_it[txin.prevout].insert(ret.first);
}
LogPrint(BCLog::TXPACKAGES, "stored orphan tx %s (wtxid=%s) (mapsz %u outsz %u)\n", hash.ToString(), wtxid.ToString(),
m_orphans.size(), m_outpoint_to_orphan_it.size());
return true;
}
int TxOrphanage::EraseTx(const Txid& txid)
{
LOCK(m_mutex);
return EraseTxNoLock(txid);
}
int TxOrphanage::EraseTxNoLock(const Txid& txid)
{
AssertLockHeld(m_mutex);
std::map<Txid, OrphanTx>::iterator it = m_orphans.find(txid);
if (it == m_orphans.end())
return 0;
for (const CTxIn& txin : it->second.tx->vin)
{
auto itPrev = m_outpoint_to_orphan_it.find(txin.prevout);
if (itPrev == m_outpoint_to_orphan_it.end())
continue;
itPrev->second.erase(it);
if (itPrev->second.empty())
m_outpoint_to_orphan_it.erase(itPrev);
}
size_t old_pos = it->second.list_pos;
assert(m_orphan_list[old_pos] == it);
if (old_pos + 1 != m_orphan_list.size()) {
// Unless we're deleting the last entry in m_orphan_list, move the last
// entry to the position we're deleting.
auto it_last = m_orphan_list.back();
m_orphan_list[old_pos] = it_last;
it_last->second.list_pos = old_pos;
}
const auto& wtxid = it->second.tx->GetWitnessHash();
LogPrint(BCLog::TXPACKAGES, " removed orphan tx %s (wtxid=%s)\n", txid.ToString(), wtxid.ToString());
m_orphan_list.pop_back();
m_wtxid_to_orphan_it.erase(it->second.tx->GetWitnessHash());
m_orphans.erase(it);
return 1;
}
void TxOrphanage::EraseForPeer(NodeId peer)
{
LOCK(m_mutex);
m_peer_work_set.erase(peer);
int nErased = 0;
std::map<Txid, OrphanTx>::iterator iter = m_orphans.begin();
while (iter != m_orphans.end())
{
std::map<Txid, OrphanTx>::iterator maybeErase = iter++; // increment to avoid iterator becoming invalid
if (maybeErase->second.fromPeer == peer)
{
nErased += EraseTxNoLock(maybeErase->second.tx->GetHash());
}
}
if (nErased > 0) LogPrint(BCLog::TXPACKAGES, "Erased %d orphan tx from peer=%d\n", nErased, peer);
}
void TxOrphanage::LimitOrphans(unsigned int max_orphans, FastRandomContext& rng)
{
LOCK(m_mutex);
unsigned int nEvicted = 0;
static int64_t nNextSweep;
int64_t nNow = GetTime();
if (nNextSweep <= nNow) {
// Sweep out expired orphan pool entries:
int nErased = 0;
int64_t nMinExpTime = nNow + ORPHAN_TX_EXPIRE_TIME - ORPHAN_TX_EXPIRE_INTERVAL;
std::map<Txid, OrphanTx>::iterator iter = m_orphans.begin();
while (iter != m_orphans.end())
{
std::map<Txid, OrphanTx>::iterator maybeErase = iter++;
if (maybeErase->second.nTimeExpire <= nNow) {
nErased += EraseTxNoLock(maybeErase->second.tx->GetHash());
} else {
nMinExpTime = std::min(maybeErase->second.nTimeExpire, nMinExpTime);
}
}
// Sweep again 5 minutes after the next entry that expires in order to batch the linear scan.
nNextSweep = nMinExpTime + ORPHAN_TX_EXPIRE_INTERVAL;
if (nErased > 0) LogPrint(BCLog::TXPACKAGES, "Erased %d orphan tx due to expiration\n", nErased);
}
while (m_orphans.size() > max_orphans)
{
// Evict a random orphan:
size_t randompos = rng.randrange(m_orphan_list.size());
EraseTxNoLock(m_orphan_list[randompos]->first);
++nEvicted;
}
if (nEvicted > 0) LogPrint(BCLog::TXPACKAGES, "orphanage overflow, removed %u tx\n", nEvicted);
}
void TxOrphanage::AddChildrenToWorkSet(const CTransaction& tx)
{
LOCK(m_mutex);
for (unsigned int i = 0; i < tx.vout.size(); i++) {
const auto it_by_prev = m_outpoint_to_orphan_it.find(COutPoint(tx.GetHash(), i));
if (it_by_prev != m_outpoint_to_orphan_it.end()) {
for (const auto& elem : it_by_prev->second) {
// Get this source peer's work set, emplacing an empty set if it didn't exist
// (note: if this peer wasn't still connected, we would have removed the orphan tx already)
std::set<Txid>& orphan_work_set = m_peer_work_set.try_emplace(elem->second.fromPeer).first->second;
// Add this tx to the work set
orphan_work_set.insert(elem->first);
LogPrint(BCLog::TXPACKAGES, "added %s (wtxid=%s) to peer %d workset\n",
tx.GetHash().ToString(), tx.GetWitnessHash().ToString(), elem->second.fromPeer);
}
}
}
}
bool TxOrphanage::HaveTx(const GenTxid& gtxid) const
{
LOCK(m_mutex);
if (gtxid.IsWtxid()) {
return m_wtxid_to_orphan_it.count(Wtxid::FromUint256(gtxid.GetHash()));
} else {
return m_orphans.count(Txid::FromUint256(gtxid.GetHash()));
}
}
CTransactionRef TxOrphanage::GetTxToReconsider(NodeId peer)
{
LOCK(m_mutex);
auto work_set_it = m_peer_work_set.find(peer);
if (work_set_it != m_peer_work_set.end()) {
auto& work_set = work_set_it->second;
while (!work_set.empty()) {
Txid txid = *work_set.begin();
work_set.erase(work_set.begin());
const auto orphan_it = m_orphans.find(txid);
if (orphan_it != m_orphans.end()) {
return orphan_it->second.tx;
}
}
}
return nullptr;
}
bool TxOrphanage::HaveTxToReconsider(NodeId peer)
{
LOCK(m_mutex);
auto work_set_it = m_peer_work_set.find(peer);
if (work_set_it != m_peer_work_set.end()) {
auto& work_set = work_set_it->second;
return !work_set.empty();
}
return false;
}
void TxOrphanage::EraseForBlock(const CBlock& block)
{
LOCK(m_mutex);
std::vector<Txid> vOrphanErase;
for (const CTransactionRef& ptx : block.vtx) {
const CTransaction& tx = *ptx;
// Which orphan pool entries must we evict?
for (const auto& txin : tx.vin) {
auto itByPrev = m_outpoint_to_orphan_it.find(txin.prevout);
if (itByPrev == m_outpoint_to_orphan_it.end()) continue;
for (auto mi = itByPrev->second.begin(); mi != itByPrev->second.end(); ++mi) {
const CTransaction& orphanTx = *(*mi)->second.tx;
const auto& orphanHash = orphanTx.GetHash();
vOrphanErase.push_back(orphanHash);
}
}
}
// Erase orphan transactions included or precluded by this block
if (vOrphanErase.size()) {
int nErased = 0;
for (const auto& orphanHash : vOrphanErase) {
nErased += EraseTxNoLock(orphanHash);
}
LogPrint(BCLog::TXPACKAGES, "Erased %d orphan tx included or conflicted by block\n", nErased);
}
}
std::vector<CTransactionRef> TxOrphanage::GetChildrenFromSamePeer(const CTransactionRef& parent, NodeId nodeid) const
{
LOCK(m_mutex);
// First construct a vector of iterators to ensure we do not return duplicates of the same tx
// and so we can sort by nTimeExpire.
std::vector<OrphanMap::iterator> iters;
// For each output, get all entries spending this prevout, filtering for ones from the specified peer.
for (unsigned int i = 0; i < parent->vout.size(); i++) {
const auto it_by_prev = m_outpoint_to_orphan_it.find(COutPoint(parent->GetHash(), i));
if (it_by_prev != m_outpoint_to_orphan_it.end()) {
for (const auto& elem : it_by_prev->second) {
if (elem->second.fromPeer == nodeid) {
iters.emplace_back(elem);
}
}
}
}
// Sort by address so that duplicates can be deleted. At the same time, sort so that more recent
// orphans (which expire later) come first. Break ties based on address, as nTimeExpire is
// quantified in seconds and it is possible for orphans to have the same expiry.
std::sort(iters.begin(), iters.end(), [](const auto& lhs, const auto& rhs) {
if (lhs->second.nTimeExpire == rhs->second.nTimeExpire) {
return &(*lhs) < &(*rhs);
} else {
return lhs->second.nTimeExpire > rhs->second.nTimeExpire;
}
});
// Erase duplicates
iters.erase(std::unique(iters.begin(), iters.end()), iters.end());
// Convert to a vector of CTransactionRef
std::vector<CTransactionRef> children_found;
children_found.reserve(iters.size());
for (const auto& child_iter : iters) {
children_found.emplace_back(child_iter->second.tx);
}
return children_found;
}
std::vector<std::pair<CTransactionRef, NodeId>> TxOrphanage::GetChildrenFromDifferentPeer(const CTransactionRef& parent, NodeId nodeid) const
{
LOCK(m_mutex);
// First construct vector of iterators to ensure we do not return duplicates of the same tx.
std::vector<OrphanMap::iterator> iters;
// For each output, get all entries spending this prevout, filtering for ones not from the specified peer.
for (unsigned int i = 0; i < parent->vout.size(); i++) {
const auto it_by_prev = m_outpoint_to_orphan_it.find(COutPoint(parent->GetHash(), i));
if (it_by_prev != m_outpoint_to_orphan_it.end()) {
for (const auto& elem : it_by_prev->second) {
if (elem->second.fromPeer != nodeid) {
iters.emplace_back(elem);
}
}
}
}
// Erase duplicates
std::sort(iters.begin(), iters.end(), IteratorComparator());
iters.erase(std::unique(iters.begin(), iters.end()), iters.end());
// Convert iterators to pair<CTransactionRef, NodeId>
std::vector<std::pair<CTransactionRef, NodeId>> children_found;
children_found.reserve(iters.size());
for (const auto& child_iter : iters) {
children_found.emplace_back(child_iter->second.tx, child_iter->second.fromPeer);
}
return children_found;
}