[refactor] make TxOrphanage keep itself trimmed

This commit is contained in:
glozow 2025-07-03 16:06:21 -04:00
parent d1fac25ff3
commit af7402ccfa
6 changed files with 125 additions and 166 deletions

View File

@ -97,7 +97,6 @@ static void OrphanageSinglePeerEviction(benchmark::Bench& bench)
// Lastly, add the large transaction.
const auto num_announcements_before_trim{orphanage->CountAnnouncements()};
assert(orphanage->AddTx(large_tx, peer));
orphanage->LimitOrphans();
// If there are multiple peers, note that they all have the same DoS score. We will evict only 1 item at a time for each new DoSiest peer.
const auto num_announcements_after_trim{orphanage->CountAnnouncements()};
@ -178,7 +177,6 @@ static void OrphanageMultiPeerEviction(benchmark::Bench& bench)
const auto num_announcements_before_trim{orphanage->CountAnnouncements()};
// There is a small gap between the total usage and the max usage. Add a transaction to fill it.
assert(orphanage->AddTx(last_tx, 0));
orphanage->LimitOrphans();
// If there are multiple peers, note that they all have the same DoS score. We will evict only 1 item at a time for each new DoSiest peer.
const auto num_evicted{num_announcements_before_trim - orphanage->CountAnnouncements() + 1};

View File

@ -188,7 +188,6 @@ bool TxDownloadManagerImpl::AddTxAnnouncement(NodeId peer, const GenTxid& gtxid,
if (MaybeAddOrphanResolutionCandidate(unique_parents, *wtxid, peer, now)) {
m_orphanage->AddAnnouncer(orphan_tx->GetWitnessHash(), peer);
m_orphanage->LimitOrphans();
}
// Return even if the peer isn't an orphan resolution candidate. This would be caught by AlreadyHaveTx.
@ -419,9 +418,6 @@ node::RejectedTxTodo TxDownloadManagerImpl::MempoolRejectedTx(const CTransaction
// Once added to the orphan pool, a tx is considered AlreadyHave, and we shouldn't request it anymore.
m_txrequest.ForgetTxHash(tx.GetHash());
m_txrequest.ForgetTxHash(tx.GetWitnessHash());
// DoS prevention: do not allow m_orphanage to grow unbounded (see CVE-2012-3789)
m_orphanage->LimitOrphans();
} else {
unique_parents.clear();
LogDebug(BCLog::MEMPOOL, "not keeping orphan with rejected parents %s (wtxid=%s)\n",

View File

@ -172,11 +172,18 @@ class TxOrphanageImpl final : public TxOrphanage {
template<typename Tag>
void Erase(Iter<Tag> it);
/** Erase by wtxid. */
bool EraseTxInternal(const Wtxid& wtxid);
/** Check if there is exactly one announcement with the same wtxid as it. */
bool IsUnique(Iter<ByWtxid> it) const;
/** Check if the orphanage needs trimming. */
bool NeedsTrim() const;
/** Limit the orphanage to MaxGlobalLatencyScore and MaxGlobalUsage. */
void LimitOrphans();
public:
TxOrphanageImpl() = default;
TxOrphanageImpl(Count max_global_ann, Usage reserved_peer_usage) :
@ -216,7 +223,6 @@ public:
bool EraseTx(const Wtxid& wtxid) override;
void EraseForPeer(NodeId peer) override;
void EraseForBlock(const CBlock& block) override;
void LimitOrphans() override;
std::vector<std::pair<Wtxid, NodeId>> AddChildrenToWorkSet(const CTransaction& tx, FastRandomContext& rng) override;
bool HaveTxToReconsider(NodeId peer) override;
std::vector<CTransactionRef> GetChildrenFromSamePeer(const CTransactionRef& parent, NodeId nodeid) const override;
@ -332,6 +338,9 @@ bool TxOrphanageImpl::AddTx(const CTransactionRef& tx, NodeId peer)
peer, txid.ToString(), wtxid.ToString());
Assume(!IsUnique(iter));
}
// DoS prevention: do not allow m_orphanage to grow unbounded (see CVE-2012-3789)
LimitOrphans();
return brand_new;
}
@ -360,10 +369,13 @@ bool TxOrphanageImpl::AddAnnouncer(const Wtxid& wtxid, NodeId peer)
peer, txid.ToString(), wtxid.ToString());
Assume(!IsUnique(iter));
// DoS prevention: do not allow m_orphanage to grow unbounded (see CVE-2012-3789)
LimitOrphans();
return true;
}
bool TxOrphanageImpl::EraseTx(const Wtxid& wtxid)
bool TxOrphanageImpl::EraseTxInternal(const Wtxid& wtxid)
{
auto& index_by_wtxid = m_orphans.get<ByWtxid>();
@ -378,12 +390,21 @@ bool TxOrphanageImpl::EraseTx(const Wtxid& wtxid)
Erase<ByWtxid>(it++);
num_ann += 1;
}
LogDebug(BCLog::TXPACKAGES, "removed orphan tx %s (wtxid=%s) (%u announcements)\n", txid.ToString(), wtxid.ToString(), num_ann);
return true;
}
bool TxOrphanageImpl::EraseTx(const Wtxid& wtxid)
{
const auto ret = EraseTxInternal(wtxid);
// Deletions can cause the orphanage's MaxGlobalUsage to decrease, so we may need to trim here.
LimitOrphans();
return ret;
}
/** Erase all entries by this peer. */
void TxOrphanageImpl::EraseForPeer(NodeId peer)
{
@ -400,6 +421,9 @@ void TxOrphanageImpl::EraseForPeer(NodeId peer)
Assume(!m_peer_orphanage_info.contains(peer));
if (num_ann > 0) LogDebug(BCLog::TXPACKAGES, "Erased %d orphan transaction(s) from peer=%d\n", num_ann, peer);
// Deletions can cause the orphanage's MaxGlobalUsage to decrease, so we may need to trim here.
LimitOrphans();
}
/** If the data structure needs trimming, evicts announcements by selecting the DoSiest peer and evicting its oldest
@ -565,6 +589,7 @@ bool TxOrphanageImpl::HaveTxToReconsider(NodeId peer)
auto it = m_orphans.get<ByPeer>().lower_bound(ByPeerView{peer, true, 0});
return it != m_orphans.get<ByPeer>().end() && it->m_announcer == peer && it->m_reconsider;
}
void TxOrphanageImpl::EraseForBlock(const CBlock& block)
{
std::set<Wtxid> wtxids_to_erase;
@ -583,13 +608,19 @@ void TxOrphanageImpl::EraseForBlock(const CBlock& block)
unsigned int num_erased{0};
for (const auto& wtxid : wtxids_to_erase) {
num_erased += EraseTx(wtxid) ? 1 : 0;
// Don't use EraseTx here because it calls LimitOrphans and announcements deleted in that call are not reflected
// in its return result. Waiting until the end to do LimitOrphans helps save repeated computation and allows us
// to check that num_erased is what we expect.
num_erased += EraseTxInternal(wtxid) ? 1 : 0;
}
if (num_erased != 0) {
LogDebug(BCLog::TXPACKAGES, "Erased %d orphan transaction(s) included or conflicted by block\n", num_erased);
}
Assume(wtxids_to_erase.size() == num_erased);
// Deletions can cause the orphanage's MaxGlobalUsage to decrease, so we may need to trim here.
LimitOrphans();
}
/** Get all children that spend from this tx and were received from nodeid. Sorted from most
@ -697,6 +728,8 @@ void TxOrphanageImpl::SanityCheck() const
const auto summed_peer_latency_score = std::accumulate(m_peer_orphanage_info.begin(), m_peer_orphanage_info.end(),
TxOrphanage::Count{0}, [](TxOrphanage::Count sum, const auto pair) { return sum + pair.second.m_total_latency_score; });
assert(summed_peer_latency_score >= m_unique_rounded_input_scores + m_orphans.size());
assert(!NeedsTrim());
}
TxOrphanage::Count TxOrphanageImpl::MaxGlobalLatencyScore() const { return m_max_global_latency_score; }

View File

@ -88,9 +88,6 @@ public:
/** Erase all orphans included in or invalidated by a new block */
virtual void EraseForBlock(const CBlock& block) = 0;
/** Limit the orphanage to MaxGlobalLatencyScore and MaxGlobalUsage. */
virtual void LimitOrphans() = 0;
/** Add any orphans that list a particular tx as a parent into the from peer's work set */
virtual std::vector<std::pair<Wtxid, NodeId>> AddChildrenToWorkSet(const CTransaction& tx, FastRandomContext& rng) = 0;

View File

@ -122,37 +122,34 @@ FUZZ_TARGET(txorphan, .init = initialize_orphanage)
},
[&] {
bool have_tx = orphanage->HaveTx(tx->GetWitnessHash());
bool have_tx_and_peer = orphanage->HaveTxFromPeer(wtxid, peer_id);
// AddTx should return false if tx is too big or already have it
// tx weight is unknown, we only check when tx is already in orphanage
{
bool add_tx = orphanage->AddTx(tx, peer_id);
// have_tx == true -> add_tx == false
Assert(!have_tx || !add_tx);
// have_tx_and_peer == true -> add_tx == false
Assert(!have_tx_and_peer || !add_tx);
// After AddTx, the orphanage may trim itself, so the peer's usage may have gone up or down.
if (add_tx) {
Assert(orphanage->UsageByPeer(peer_id) == tx_weight + total_peer_bytes_start);
Assert(orphanage->TotalOrphanUsage() == tx_weight + total_bytes_start);
Assert(tx_weight <= MAX_STANDARD_TX_WEIGHT);
} else {
// Peer may have been added as an announcer.
if (orphanage->UsageByPeer(peer_id) == tx_weight + total_peer_bytes_start) {
if (orphanage->UsageByPeer(peer_id) > total_peer_bytes_start) {
Assert(orphanage->HaveTxFromPeer(wtxid, peer_id));
} else {
// Otherwise, there must not be any change to the peer byte count.
Assert(orphanage->UsageByPeer(peer_id) == total_peer_bytes_start);
}
// Regardless, total bytes should not have changed.
Assert(orphanage->TotalOrphanUsage() == total_bytes_start);
// If announcement was added, total bytes does not increase.
// However, if eviction was triggered, the value may decrease.
Assert(orphanage->TotalOrphanUsage() <= total_bytes_start);
}
}
have_tx = orphanage->HaveTx(tx->GetWitnessHash());
{
bool add_tx = orphanage->AddTx(tx, peer_id);
// if have_tx is still false, it must be too big
Assert(!have_tx == (tx_weight > MAX_STANDARD_TX_WEIGHT));
Assert(!have_tx || !add_tx);
}
// We are not guaranteed to have_tx after AddTx. There are a few possibile reasons:
// - tx itself exceeds the per-peer memory usage limit, so LimitOrphans had to remove it immediately
// - tx itself exceeds the per-peer latency score limit, so LimitOrphans had to remove it immediately
// - the orphanage needed trim and all other announcements from this peer are reconsiderable
},
[&] {
bool have_tx = orphanage->HaveTx(tx->GetWitnessHash());
@ -165,14 +162,9 @@ FUZZ_TARGET(txorphan, .init = initialize_orphanage)
// have_tx_and_peer == true -> added_announcer == false
Assert(!have_tx_and_peer || !added_announcer);
// Total bytes should not have changed. If peer was added as announcer, byte
// accounting must have been updated.
Assert(orphanage->TotalOrphanUsage() == total_bytes_start);
if (added_announcer) {
Assert(orphanage->UsageByPeer(peer_id) == tx_weight + total_peer_bytes_start);
} else {
Assert(orphanage->UsageByPeer(peer_id) == total_peer_bytes_start);
}
// If announcement was added, total bytes does not increase.
// However, if eviction was triggered, the value may decrease.
Assert(orphanage->TotalOrphanUsage() <= total_bytes_start);
}
},
[&] {
@ -182,15 +174,11 @@ FUZZ_TARGET(txorphan, .init = initialize_orphanage)
{
auto bytes_from_peer_before{orphanage->UsageByPeer(peer_id)};
Assert(have_tx == orphanage->EraseTx(tx->GetWitnessHash()));
// After EraseTx, the orphanage may trim itself, so all peers' usage may have gone up or down.
if (have_tx) {
Assert(orphanage->TotalOrphanUsage() == total_bytes_start - tx_weight);
if (have_tx_and_peer) {
Assert(orphanage->UsageByPeer(peer_id) == bytes_from_peer_before - tx_weight);
} else {
if (!have_tx_and_peer) {
Assert(orphanage->UsageByPeer(peer_id) == bytes_from_peer_before);
}
} else {
Assert(orphanage->TotalOrphanUsage() == total_bytes_start);
}
}
have_tx = orphanage->HaveTx(tx->GetWitnessHash());
@ -218,13 +206,8 @@ FUZZ_TARGET(txorphan, .init = initialize_orphanage)
Assert(!orphanage->HaveTx(tx_removed->GetWitnessHash()));
Assert(!orphanage->HaveTxFromPeer(tx_removed->GetWitnessHash(), peer_id));
}
},
[&] {
// test mocktime and expiry
SetMockTime(ConsumeTime(fuzzed_data_provider));
orphanage->LimitOrphans();
});
}
);
}
// Set tx as potential parent to be used for future GetChildren() calls.
@ -369,33 +352,8 @@ FUZZ_TARGET(txorphan_protected, .init = initialize_orphanage)
Assert(orphanage->LatencyScoreFromPeer(peer_id) == 0);
Assert(orphanage->AnnouncementsFromPeer(peer_id) == 0);
}
},
[&] { // LimitOrphans
// Assert that protected peers are never affected by LimitOrphans.
unsigned int protected_count = 0;
unsigned int protected_bytes = 0;
for (unsigned int peer = 0; peer < num_peers; ++peer) {
if (protected_peers[peer]) {
protected_count += orphanage->LatencyScoreFromPeer(peer);
protected_bytes += orphanage->UsageByPeer(peer);
}
}
orphanage->LimitOrphans();
Assert(orphanage->TotalLatencyScore() <= global_latency_score_limit);
Assert(orphanage->TotalOrphanUsage() <= per_peer_weight_reservation * num_peers);
// Number of announcements and usage should never differ before and after since
// we've never exceeded the per-peer reservations.
for (unsigned int peer = 0; peer < num_peers; ++peer) {
if (protected_peers[peer]) {
protected_count -= orphanage->LatencyScoreFromPeer(peer);
protected_bytes -= orphanage->UsageByPeer(peer);
}
}
Assert(protected_count == 0);
Assert(protected_bytes == 0);
});
}
);
}
}
@ -727,59 +685,56 @@ FUZZ_TARGET(txorphanage_sim)
assert(!have_reconsider_fn(peer));
}
break;
} else if (command-- == 0) {
// LimitOrphans
const auto max_ann = max_global_ann / std::max<unsigned>(1, count_peers_fn());
const auto max_mem = reserved_peer_usage;
while (true) {
// Count global usage and number of peers.
node::TxOrphanage::Usage total_usage{0};
node::TxOrphanage::Count total_latency_score = sim_announcements.size();
for (unsigned tx = 0; tx < NUM_TX; ++tx) {
if (have_tx_fn(tx)) {
total_usage += GetTransactionWeight(*txn[tx]);
total_latency_score += txn[tx]->vin.size() / 10;
}
}
auto num_peers = count_peers_fn();
bool oversized = (total_usage > reserved_peer_usage * num_peers) ||
(total_latency_score > real->MaxGlobalLatencyScore());
if (!oversized) break;
// Find worst peer.
FeeFrac worst_dos_score{0, 1};
unsigned worst_peer = unsigned(-1);
for (unsigned peer = 0; peer < NUM_PEERS; ++peer) {
auto dos_score = dos_score_fn(peer, max_ann, max_mem);
// Use >= so that the more recent peer (higher NodeId) wins in case of
// ties.
if (dos_score >= worst_dos_score) {
worst_dos_score = dos_score;
worst_peer = peer;
}
}
assert(worst_peer != unsigned(-1));
assert(worst_dos_score >> FeeFrac(1, 1));
// Find oldest announcement from worst_peer, preferring non-reconsiderable ones.
bool done{false};
for (int reconsider = 0; reconsider < 2; ++reconsider) {
for (auto it = sim_announcements.begin(); it != sim_announcements.end(); ++it) {
if (it->announcer != worst_peer || it->reconsider != reconsider) continue;
sim_announcements.erase(it);
done = true;
break;
}
if (done) break;
}
assert(done);
}
real->LimitOrphans();
// We must now be within limits, otherwise LimitOrphans should have continued further).
// We don't check the contents of the orphanage until the end to make fuzz runs faster.
assert(real->TotalLatencyScore() <= real->MaxGlobalLatencyScore());
assert(real->TotalOrphanUsage() <= real->MaxGlobalUsage());
break;
}
}
// Always trim after each command if needed.
const auto max_ann = max_global_ann / std::max<unsigned>(1, count_peers_fn());
const auto max_mem = reserved_peer_usage;
while (true) {
// Count global usage and number of peers.
node::TxOrphanage::Usage total_usage{0};
node::TxOrphanage::Count total_latency_score = sim_announcements.size();
for (unsigned tx = 0; tx < NUM_TX; ++tx) {
if (have_tx_fn(tx)) {
total_usage += GetTransactionWeight(*txn[tx]);
total_latency_score += txn[tx]->vin.size() / 10;
}
}
auto num_peers = count_peers_fn();
bool oversized = (total_usage > reserved_peer_usage * num_peers) ||
(total_latency_score > real->MaxGlobalLatencyScore());
if (!oversized) break;
// Find worst peer.
FeeFrac worst_dos_score{0, 1};
unsigned worst_peer = unsigned(-1);
for (unsigned peer = 0; peer < NUM_PEERS; ++peer) {
auto dos_score = dos_score_fn(peer, max_ann, max_mem);
// Use >= so that the more recent peer (higher NodeId) wins in case of
// ties.
if (dos_score >= worst_dos_score) {
worst_dos_score = dos_score;
worst_peer = peer;
}
}
assert(worst_peer != unsigned(-1));
assert(worst_dos_score >> FeeFrac(1, 1));
// Find oldest announcement from worst_peer, preferring non-reconsiderable ones.
bool done{false};
for (int reconsider = 0; reconsider < 2; ++reconsider) {
for (auto it = sim_announcements.begin(); it != sim_announcements.end(); ++it) {
if (it->announcer != worst_peer || it->reconsider != reconsider) continue;
sim_announcements.erase(it);
done = true;
break;
}
if (done) break;
}
assert(done);
}
// We must now be within limits, otherwise LimitOrphans should have continued further).
// We don't check the contents of the orphanage until the end to make fuzz runs faster.
assert(real->TotalLatencyScore() <= real->MaxGlobalLatencyScore());
assert(real->TotalOrphanUsage() <= real->MaxGlobalUsage());
}
//

View File

@ -72,15 +72,6 @@ static bool EqualTxns(const std::set<CTransactionRef>& set_txns, const std::vect
return true;
}
unsigned int CheckNumEvictions(node::TxOrphanage& orphanage)
{
const auto original_total_count{orphanage.CountAnnouncements()};
orphanage.LimitOrphans();
assert(orphanage.TotalLatencyScore() <= orphanage.MaxGlobalLatencyScore());
assert(orphanage.TotalOrphanUsage() <= orphanage.MaxGlobalUsage());
return original_total_count - orphanage.CountAnnouncements();
}
BOOST_AUTO_TEST_CASE(peer_dos_limits)
{
FastRandomContext det_rand{true};
@ -106,30 +97,22 @@ BOOST_AUTO_TEST_CASE(peer_dos_limits)
auto orphanage_low_ann = node::MakeTxOrphanage(/*max_global_ann=*/1, /*reserved_peer_usage=*/TX_SIZE * 10);
auto orphanage_low_mem = node::MakeTxOrphanage(/*max_global_ann=*/10, /*reserved_peer_usage=*/TX_SIZE);
BOOST_CHECK_EQUAL(CheckNumEvictions(*orphanage_low_mem), 0);
BOOST_CHECK_EQUAL(CheckNumEvictions(*orphanage_low_ann), 0);
// Add the first transaction
orphanage_low_ann->AddTx(txns.at(0), peer);
orphanage_low_mem->AddTx(txns.at(0), peer);
// Add more. One of the limits is exceeded, so LimitOrphans evicts 1.
orphanage_low_ann->AddTx(txns.at(1), peer);
BOOST_CHECK(orphanage_low_ann->TotalLatencyScore() > orphanage_low_ann->MaxGlobalLatencyScore());
BOOST_CHECK(orphanage_low_ann->TotalOrphanUsage() <= orphanage_low_ann->MaxGlobalUsage());
orphanage_low_mem->AddTx(txns.at(1), peer);
BOOST_CHECK(orphanage_low_mem->TotalLatencyScore() <= orphanage_low_mem->MaxGlobalLatencyScore());
BOOST_CHECK(orphanage_low_mem->TotalOrphanUsage() > orphanage_low_mem->MaxGlobalUsage());
BOOST_CHECK_EQUAL(CheckNumEvictions(*orphanage_low_mem), 1);
BOOST_CHECK_EQUAL(CheckNumEvictions(*orphanage_low_ann), 1);
// The older transaction is evicted.
BOOST_CHECK(!orphanage_low_ann->HaveTx(txns.at(0)->GetWitnessHash()));
BOOST_CHECK(!orphanage_low_mem->HaveTx(txns.at(0)->GetWitnessHash()));
BOOST_CHECK(orphanage_low_ann->HaveTx(txns.at(1)->GetWitnessHash()));
BOOST_CHECK(orphanage_low_mem->HaveTx(txns.at(1)->GetWitnessHash()));
orphanage_low_ann->SanityCheck();
orphanage_low_mem->SanityCheck();
}
// Single peer: latency score includes inputs
@ -138,8 +121,6 @@ BOOST_AUTO_TEST_CASE(peer_dos_limits)
NodeId peer{10};
auto orphanage_low_ann = node::MakeTxOrphanage(/*max_global_ann=*/5, /*reserved_peer_usage=*/TX_SIZE * 1000);
BOOST_CHECK_EQUAL(CheckNumEvictions(*orphanage_low_ann), 0);
// Add the first transaction
orphanage_low_ann->AddTx(txns.at(0), peer);
@ -151,14 +132,11 @@ BOOST_AUTO_TEST_CASE(peer_dos_limits)
auto ptx = MakeTransactionSpending(outpoints_45, det_rand);
orphanage_low_ann->AddTx(ptx, peer);
BOOST_CHECK(orphanage_low_ann->TotalLatencyScore() > orphanage_low_ann->MaxGlobalLatencyScore());
BOOST_CHECK(orphanage_low_ann->LatencyScoreFromPeer(peer) > orphanage_low_ann->MaxPeerLatencyScore());
BOOST_CHECK_EQUAL(CheckNumEvictions(*orphanage_low_ann), 1);
// The older transaction is evicted.
BOOST_CHECK(!orphanage_low_ann->HaveTx(txns.at(0)->GetWitnessHash()));
BOOST_CHECK(orphanage_low_ann->HaveTx(ptx->GetWitnessHash()));
orphanage_low_ann->SanityCheck();
}
// Single peer: eviction order is FIFO on non-reconsiderable, then reconsiderable orphans.
@ -191,15 +169,14 @@ BOOST_AUTO_TEST_CASE(peer_dos_limits)
// Add 1 more orphan, causing the orphanage to be oversize. child1 is evicted.
orphanage->AddTx(children.at(3), peer);
BOOST_CHECK_EQUAL(CheckNumEvictions(*orphanage), 1);
BOOST_CHECK(orphanage->HaveTx(children.at(0)->GetWitnessHash()));
BOOST_CHECK(!orphanage->HaveTx(children.at(1)->GetWitnessHash()));
BOOST_CHECK(orphanage->HaveTx(children.at(2)->GetWitnessHash()));
BOOST_CHECK(orphanage->HaveTx(children.at(3)->GetWitnessHash()));
orphanage->SanityCheck();
// Add 1 more... child2 is evicted.
orphanage->AddTx(children.at(4), peer);
BOOST_CHECK_EQUAL(CheckNumEvictions(*orphanage), 1);
BOOST_CHECK(orphanage->HaveTx(children.at(0)->GetWitnessHash()));
BOOST_CHECK(!orphanage->HaveTx(children.at(2)->GetWitnessHash()));
BOOST_CHECK(orphanage->HaveTx(children.at(3)->GetWitnessHash()));
@ -213,7 +190,6 @@ BOOST_AUTO_TEST_CASE(peer_dos_limits)
// child5 is evicted immediately because it is the only non-reconsiderable orphan.
orphanage->AddTx(children.at(5), peer);
BOOST_CHECK_EQUAL(CheckNumEvictions(*orphanage), 1);
BOOST_CHECK(orphanage->HaveTx(children.at(0)->GetWitnessHash()));
BOOST_CHECK(orphanage->HaveTx(children.at(3)->GetWitnessHash()));
BOOST_CHECK(orphanage->HaveTx(children.at(4)->GetWitnessHash()));
@ -222,7 +198,6 @@ BOOST_AUTO_TEST_CASE(peer_dos_limits)
// Transactions are marked non-reconsiderable again when returned through GetTxToReconsider
BOOST_CHECK_EQUAL(orphanage->GetTxToReconsider(peer), children.at(0));
orphanage->AddTx(children.at(6), peer);
BOOST_CHECK_EQUAL(CheckNumEvictions(*orphanage), 1);
BOOST_CHECK(!orphanage->HaveTx(children.at(0)->GetWitnessHash()));
BOOST_CHECK(orphanage->HaveTx(children.at(3)->GetWitnessHash()));
BOOST_CHECK(orphanage->HaveTx(children.at(4)->GetWitnessHash()));
@ -232,6 +207,8 @@ BOOST_AUTO_TEST_CASE(peer_dos_limits)
// reconsideration earlier.
BOOST_CHECK_EQUAL(orphanage->GetTxToReconsider(peer), children.at(3));
BOOST_CHECK_EQUAL(orphanage->GetTxToReconsider(peer), children.at(4));
orphanage->SanityCheck();
}
// Multiple peers: when limit is exceeded, we choose the DoSiest peer and evict their oldest transaction.
@ -247,8 +224,8 @@ BOOST_AUTO_TEST_CASE(peer_dos_limits)
// No evictions happen before the global limit is reached.
for (unsigned int i{0}; i < max_announcements; ++i) {
orphanage->AddTx(txns.at(i), peer_dosy);
BOOST_CHECK_EQUAL(CheckNumEvictions(*orphanage), 0);
}
orphanage->SanityCheck();
BOOST_CHECK_EQUAL(orphanage->AnnouncementsFromPeer(peer_dosy), max_announcements);
BOOST_CHECK_EQUAL(orphanage->AnnouncementsFromPeer(peer1), 0);
BOOST_CHECK_EQUAL(orphanage->AnnouncementsFromPeer(peer2), 0);
@ -260,7 +237,6 @@ BOOST_AUTO_TEST_CASE(peer_dos_limits)
orphanage->AddTx(txns.at(max_announcements + i), peer1);
// The announcement limit per peer has halved, but LimitOrphans does not evict beyond what is necessary to
// bring the total announcements within its global limit.
BOOST_CHECK_EQUAL(CheckNumEvictions(*orphanage), 1);
BOOST_CHECK(orphanage->AnnouncementsFromPeer(peer_dosy) > orphanage->MaxPeerLatencyScore());
BOOST_CHECK_EQUAL(orphanage->AnnouncementsFromPeer(peer1), i + 1);
@ -276,9 +252,6 @@ BOOST_AUTO_TEST_CASE(peer_dos_limits)
BOOST_CHECK(orphanage->HaveTxFromPeer(txns.at(i)->GetWitnessHash(), peer_dosy));
orphanage->AddTx(txns.at(i), peer2);
// Announcement limit is by entry, not by unique orphans
BOOST_CHECK_EQUAL(CheckNumEvictions(*orphanage), 1);
// peer_dosy is still the only one getting evicted
BOOST_CHECK_EQUAL(orphanage->AnnouncementsFromPeer(peer_dosy), max_announcements - i - 1);
BOOST_CHECK_EQUAL(orphanage->AnnouncementsFromPeer(peer1), num_from_peer1);
@ -291,15 +264,18 @@ BOOST_AUTO_TEST_CASE(peer_dos_limits)
// With 6 peers, each can add 10, and still only peer_dosy's orphans are evicted.
const unsigned int max_per_peer{max_announcements / 6};
const unsigned int num_announcements{orphanage->CountAnnouncements()};
for (NodeId peer{3}; peer < 6; ++peer) {
for (unsigned int i{0}; i < max_per_peer; ++i) {
// Each addition causes 1 eviction.
orphanage->AddTx(txns.at(peer * max_per_peer + i), peer);
BOOST_CHECK_EQUAL(CheckNumEvictions(*orphanage), 1);
BOOST_CHECK_EQUAL(orphanage->CountAnnouncements(), num_announcements);
}
}
for (NodeId peer{0}; peer < 6; ++peer) {
BOOST_CHECK_EQUAL(orphanage->AnnouncementsFromPeer(peer), max_per_peer);
}
orphanage->SanityCheck();
}
// Limits change as more peers are added.
@ -355,6 +331,8 @@ BOOST_AUTO_TEST_CASE(peer_dos_limits)
BOOST_CHECK_EQUAL(orphanage->ReservedPeerUsage(), node::DEFAULT_RESERVED_ORPHAN_WEIGHT_PER_PEER);
BOOST_CHECK_EQUAL(orphanage->MaxGlobalUsage(), node::DEFAULT_RESERVED_ORPHAN_WEIGHT_PER_PEER);
BOOST_CHECK_EQUAL(orphanage->MaxPeerLatencyScore(), node::DEFAULT_MAX_ORPHANAGE_LATENCY_SCORE);
orphanage->SanityCheck();
}
// Test eviction of multiple transactions at a time
@ -378,17 +356,17 @@ BOOST_AUTO_TEST_CASE(peer_dos_limits)
}
BOOST_CHECK(orphanage->TotalLatencyScore() <= orphanage->MaxGlobalLatencyScore());
BOOST_CHECK(orphanage->TotalOrphanUsage() <= orphanage->MaxGlobalUsage());
BOOST_CHECK_EQUAL(CheckNumEvictions(*orphanage), 0);
// Add the large transaction. This should cause evictions of all the previous 10 transactions from that peer.
orphanage->AddTx(ptx_large, peer_large);
BOOST_CHECK_EQUAL(CheckNumEvictions(*orphanage), 10);
// peer_normal should still have 10 transactions, and peer_large should have 1.
BOOST_CHECK_EQUAL(orphanage->AnnouncementsFromPeer(peer_normal), 10);
BOOST_CHECK_EQUAL(orphanage->AnnouncementsFromPeer(peer_large), 1);
BOOST_CHECK(orphanage->HaveTxFromPeer(ptx_large->GetWitnessHash(), peer_large));
BOOST_CHECK_EQUAL(orphanage->CountAnnouncements(), 11);
orphanage->SanityCheck();
}
// Test that latency score includes number of inputs.
@ -432,6 +410,8 @@ BOOST_AUTO_TEST_CASE(peer_dos_limits)
// Peer 1 sent 5 of the 10 transactions with many inputs
BOOST_CHECK_EQUAL(orphanage->AnnouncementsFromPeer(1), 5);
BOOST_CHECK_EQUAL(orphanage->LatencyScoreFromPeer(1), 5 + 5 * 5);
orphanage->SanityCheck();
}
}
BOOST_AUTO_TEST_CASE(DoS_mapOrphans)