Merge bitcoin/bitcoin#33738: log: avoid collecting GetSerializeSize data when compact block logging is disabled

969c840db52da796c319f84c9a9a20b1de902ccf log,blocks: avoid `ComputeTotalSize` and `GetHash` work when logging is disabled (Lőrinc)
babfda332b6aa41143eb694193358ef2c76ebefe log,net: avoid `ComputeTotalSize` when logging is disabled (Lőrinc)
1658b8f82b99f9ba3b42a50ba1f72b19a38c8e75 refactor: rename `CTransaction::GetTotalSize` to signal that it's not cached (Lőrinc)

Pull request description:

  ### Context

  The new accounting options introduced in https://github.com/bitcoin/bitcoin/pull/32582 can be quite heavy, and are not needed when debug logging is disabled.

  ### Problem
  `PartiallyDownloadedBlock::FillBlock()` and `PeerManagerImpl::SendBlockTransactions()` accumulate transaction sizes for debug logging by calling `ComputeTotalSize()` in loops, which invokes expensive `GetSerializeSize()` serializations.
  The block header hash is also only computed for the debug log.

  ### Fixes

  Guard the size and hash calculations with `LogAcceptCategory()` checks so the serialization and hashing work only occurs when compact block debug logging is enabled.

  Also modernized the surrounding code a bit since the change is quite trivial.

  ### Reproducer

  You can test the change by starting an up-to-date `bitcoind` node with `-debug=cmpctblock` and observing compact block log lines such as:

  > [cmpctblock] Successfully reconstructed block 00000000000000000001061eaa6c0fe79258e7f79606e67ac495765cb121a520 with 1 txn prefilled, 3122 txn from mempool (incl at least 3 from extra pool) and 641 txn (352126 bytes) requested

  <details>
  <summary>Test patch</summary>

  ```patch
  diff --git a/src/blockencodings.cpp b/src/blockencodings.cpp
  index 58620c93cc..f16eb38fa5 100644
  --- a/src/blockencodings.cpp
  +++ b/src/blockencodings.cpp
  @@ -186,6 +186,7 @@ bool PartiallyDownloadedBlock::IsTxAvailable(size_t index) const

   ReadStatus PartiallyDownloadedBlock::FillBlock(CBlock& block, const std::vector<CTransactionRef>& vtx_missing, bool segwit_active)
   {
  +    LogInfo("PartiallyDownloadedBlock::FillBlock called");
       if (header.IsNull()) return READ_STATUS_INVALID;

       block = header;
  @@ -218,6 +219,7 @@ ReadStatus PartiallyDownloadedBlock::FillBlock(CBlock& block, const std::vector<
       }

       if (LogAcceptCategory(BCLog::CMPCTBLOCK, BCLog::Level::Debug)) {
  +        LogInfo("debug log enabled");
           const uint256 hash{block.GetHash()}; // avoid cleared header
           uint32_t tx_missing_size{0};
           for (const auto& tx : vtx_missing) tx_missing_size += tx->ComputeTotalSize(); // avoid cleared txn_available
  diff --git a/src/net_processing.cpp b/src/net_processing.cpp
  index 5600c8d389..c081825f77 100644
  --- a/src/net_processing.cpp
  +++ b/src/net_processing.cpp
  @@ -2470,6 +2470,7 @@ uint32_t PeerManagerImpl::GetFetchFlags(const Peer& peer) const

   void PeerManagerImpl::SendBlockTransactions(CNode& pfrom, Peer& peer, const CBlock& block, const BlockTransactionsRequest& req)
   {
  +    LogInfo("PeerManagerImpl::SendBlockTransactions called");
       BlockTransactions resp(req);
       for (size_t i = 0; i < req.indexes.size(); i++) {
           if (req.indexes[i] >= block.vtx.size()) {
  @@ -2480,6 +2481,7 @@ void PeerManagerImpl::SendBlockTransactions(CNode& pfrom, Peer& peer, const CBlo
       }

       if (LogAcceptCategory(BCLog::CMPCTBLOCK, BCLog::Level::Debug)) {
  +        LogInfo("debug log enabled");
           uint32_t tx_requested_size{0};
           for (const auto i : req.indexes) tx_requested_size += block.vtx[i]->ComputeTotalSize();
           LogDebug(BCLog::CMPCTBLOCK, "Peer %d sent us a GETBLOCKTXN for block %s, sending a BLOCKTXN with %u txns. (%u bytes)\n", pfrom.GetId(), block.GetHash().ToString(), resp.txn.size(), tx_requested_size);
  ```

  </details>

ACKs for top commit:
  davidgumberg:
    reACK 969c840db5
  achow101:
    ACK 969c840db52da796c319f84c9a9a20b1de902ccf
  hodlinator:
    re-ACK 969c840db52da796c319f84c9a9a20b1de902ccf
  sedited:
    Re-ACK 969c840db52da796c319f84c9a9a20b1de902ccf
  danielabrozzoni:
    reACK 969c840db52da796c319f84c9a9a20b1de902ccf

Tree-SHA512: 9780102d29778165144e3602d934ed4cb96660fd7b9ff2581b223c619e419139b8348e60f226af448702ae527736a1806d169b44342c5a82795590f664e16efe
This commit is contained in:
Ava Chow 2026-01-20 15:41:30 -08:00
commit a365c9fe1f
No known key found for this signature in database
GPG Key ID: 17565732E08E5E41
10 changed files with 31 additions and 25 deletions

View File

@ -58,7 +58,7 @@ static void DuplicateInputs(benchmark::Bench& bench)
naughtyTx.vout[0].nValue = 0;
naughtyTx.vout[0].scriptPubKey = SCRIPT_PUB;
uint64_t n_inputs = (((MAX_BLOCK_SERIALIZED_SIZE / WITNESS_SCALE_FACTOR) - (CTransaction(coinbaseTx).GetTotalSize() + CTransaction(naughtyTx).GetTotalSize())) / 41) - 100;
uint64_t n_inputs = (((MAX_BLOCK_SERIALIZED_SIZE / WITNESS_SCALE_FACTOR) - (CTransaction(coinbaseTx).ComputeTotalSize() + CTransaction(naughtyTx).ComputeTotalSize())) / 41) - 100;
for (uint64_t x = 0; x < (n_inputs - 1); ++x) {
naughtyTx.vin.emplace_back(Txid::FromUint256(GetRandHash()), 0, CScript(), 0);
}

View File

@ -192,40 +192,44 @@ ReadStatus PartiallyDownloadedBlock::FillBlock(CBlock& block, const std::vector<
{
if (header.IsNull()) return READ_STATUS_INVALID;
uint256 hash = header.GetHash();
block = header;
block.vtx.resize(txn_available.size());
unsigned int tx_missing_size = 0;
size_t tx_missing_offset = 0;
for (size_t i = 0; i < txn_available.size(); i++) {
if (!txn_available[i]) {
if (vtx_missing.size() <= tx_missing_offset)
if (tx_missing_offset >= vtx_missing.size()) {
return READ_STATUS_INVALID;
}
block.vtx[i] = vtx_missing[tx_missing_offset++];
tx_missing_size += block.vtx[i]->GetTotalSize();
} else
} else {
block.vtx[i] = std::move(txn_available[i]);
}
}
// Make sure we can't call FillBlock again.
header.SetNull();
txn_available.clear();
if (vtx_missing.size() != tx_missing_offset)
if (vtx_missing.size() != tx_missing_offset) {
return READ_STATUS_INVALID;
}
// Check for possible mutations early now that we have a seemingly good block
IsBlockMutatedFn check_mutated{m_check_block_mutated_mock ? m_check_block_mutated_mock : IsBlockMutated};
if (check_mutated(/*block=*/block,
/*check_witness_root=*/segwit_active)) {
if (check_mutated(/*block=*/block, /*check_witness_root=*/segwit_active)) {
return READ_STATUS_FAILED; // Possible Short ID collision
}
LogDebug(BCLog::CMPCTBLOCK, "Successfully reconstructed block %s with %u txn prefilled, %u txn from mempool (incl at least %u from extra pool) and %u txn (%u bytes) requested\n", hash.ToString(), prefilled_count, mempool_count, extra_count, vtx_missing.size(), tx_missing_size);
if (vtx_missing.size() < 5) {
for (const auto& tx : vtx_missing) {
LogDebug(BCLog::CMPCTBLOCK, "Reconstructed block %s required tx %s\n", hash.ToString(), tx->GetHash().ToString());
if (LogAcceptCategory(BCLog::CMPCTBLOCK, BCLog::Level::Debug)) {
const uint256 hash{block.GetHash()};
uint32_t tx_missing_size{0};
for (const auto& tx : vtx_missing) tx_missing_size += tx->ComputeTotalSize();
LogDebug(BCLog::CMPCTBLOCK, "Successfully reconstructed block %s with %u txn prefilled, %u txn from mempool (incl at least %u from extra pool) and %u txn (%u bytes) requested\n", hash.ToString(), prefilled_count, mempool_count, extra_count, vtx_missing.size(), tx_missing_size);
if (vtx_missing.size() < 5) {
for (const auto& tx : vtx_missing) {
LogDebug(BCLog::CMPCTBLOCK, "Reconstructed block %s required tx %s\n", hash.ToString(), tx->GetHash().ToString());
}
}
}

View File

@ -436,7 +436,7 @@ void TxToUniv(const CTransaction& tx, const uint256& block_hash, UniValue& entry
entry.pushKV("txid", tx.GetHash().GetHex());
entry.pushKV("hash", tx.GetWitnessHash().GetHex());
entry.pushKV("version", tx.version);
entry.pushKV("size", tx.GetTotalSize());
entry.pushKV("size", tx.ComputeTotalSize());
entry.pushKV("vsize", (GetTransactionWeight(tx) + WITNESS_SCALE_FACTOR - 1) / WITNESS_SCALE_FACTOR);
entry.pushKV("weight", GetTransactionWeight(tx));
entry.pushKV("locktime", (int64_t)tx.nLockTime);

View File

@ -2570,17 +2570,19 @@ uint32_t PeerManagerImpl::GetFetchFlags(const Peer& peer) const
void PeerManagerImpl::SendBlockTransactions(CNode& pfrom, Peer& peer, const CBlock& block, const BlockTransactionsRequest& req)
{
BlockTransactions resp(req);
unsigned int tx_requested_size = 0;
for (size_t i = 0; i < req.indexes.size(); i++) {
if (req.indexes[i] >= block.vtx.size()) {
Misbehaving(peer, "getblocktxn with out-of-bounds tx indices");
return;
}
resp.txn[i] = block.vtx[req.indexes[i]];
tx_requested_size += resp.txn[i]->GetTotalSize();
}
LogDebug(BCLog::CMPCTBLOCK, "Peer %d sent us a GETBLOCKTXN for block %s, sending a BLOCKTXN with %u txns. (%u bytes)\n", pfrom.GetId(), block.GetHash().ToString(), resp.txn.size(), tx_requested_size);
if (LogAcceptCategory(BCLog::CMPCTBLOCK, BCLog::Level::Debug)) {
uint32_t tx_requested_size{0};
for (const auto& tx : resp.txn) tx_requested_size += tx->ComputeTotalSize();
LogDebug(BCLog::CMPCTBLOCK, "Peer %d sent us a GETBLOCKTXN for block %s, sending a BLOCKTXN with %u txns. (%u bytes)\n", pfrom.GetId(), block.GetHash().ToString(), resp.txn.size(), tx_requested_size);
}
MakeAndPushMessage(pfrom, NetMsgType::BLOCKTXN, resp);
}

View File

@ -107,7 +107,7 @@ CAmount CTransaction::GetValueOut() const
return nValueOut;
}
unsigned int CTransaction::GetTotalSize() const
unsigned int CTransaction::ComputeTotalSize() const
{
return ::GetSerializeSize(TX_WITH_WITNESS(*this));
}

View File

@ -332,11 +332,11 @@ public:
CAmount GetValueOut() const;
/**
* Get the total transaction size in bytes, including witness data.
* Calculate the total transaction size in bytes, including witness data.
* "Total Size" defined in BIP141 and BIP144.
* @return Total transaction size in bytes
*/
unsigned int GetTotalSize() const;
unsigned int ComputeTotalSize() const;
bool IsCoinBase() const
{

View File

@ -279,7 +279,7 @@ QString TransactionDesc::toHTML(interfaces::Node& node, interfaces::Wallet& wall
strHTML += "<br><b>" + tr("Comment") + ":</b><br>" + GUIUtil::HtmlEscape(wtx.value_map["comment"], true) + "<br>";
strHTML += "<b>" + tr("Transaction ID") + ":</b> " + rec->getTxHash() + "<br>";
strHTML += "<b>" + tr("Transaction total size") + ":</b> " + QString::number(wtx.tx->GetTotalSize()) + " bytes<br>";
strHTML += "<b>" + tr("Transaction total size") + ":</b> " + QString::number(wtx.tx->ComputeTotalSize()) + " bytes<br>";
strHTML += "<b>" + tr("Transaction virtual size") + ":</b> " + QString::number(GetVirtualTransactionSize(*wtx.tx)) + " bytes<br>";
strHTML += "<b>" + tr("Output index") + ":</b> " + QString::number(rec->getOutputIndex()) + "<br>";

View File

@ -2053,7 +2053,7 @@ static RPCHelpMan getblockstats()
int64_t tx_size = 0;
if (do_calculate_size) {
tx_size = tx->GetTotalSize();
tx_size = tx->ComputeTotalSize();
if (do_mediantxsize) {
txsize_array.push_back(tx_size);
}

View File

@ -1031,7 +1031,7 @@ static UniValue OrphanToJSON(const node::TxOrphanage::OrphanInfo& orphan)
UniValue o(UniValue::VOBJ);
o.pushKV("txid", orphan.tx->GetHash().ToString());
o.pushKV("wtxid", orphan.tx->GetWitnessHash().ToString());
o.pushKV("bytes", orphan.tx->GetTotalSize());
o.pushKV("bytes", orphan.tx->ComputeTotalSize());
o.pushKV("vsize", GetVirtualTransactionSize(*orphan.tx));
o.pushKV("weight", GetTransactionWeight(*orphan.tx));
UniValue from(UniValue::VARR);

View File

@ -68,7 +68,7 @@ FUZZ_TARGET(transaction, .init = initialize_transaction)
}
(void)tx.GetHash();
(void)tx.GetTotalSize();
(void)tx.ComputeTotalSize();
try {
(void)tx.GetValueOut();
} catch (const std::runtime_error&) {
@ -92,7 +92,7 @@ FUZZ_TARGET(transaction, .init = initialize_transaction)
(void)AreInputsStandard(tx, coins_view_cache);
(void)IsWitnessStandard(tx, coins_view_cache);
if (tx.GetTotalSize() < 250'000) { // Avoid high memory usage (with msan) due to json encoding
if (tx.ComputeTotalSize() < 250'000) { // Avoid high memory usage (with msan) due to json encoding
{
UniValue u{UniValue::VOBJ};
TxToUniv(tx, /*block_hash=*/uint256::ZERO, /*entry=*/u);