rpc/net: report per-peer last_inv_sequence

This commit is contained in:
Anthony Towns 2025-09-17 12:17:41 +10:00
parent adefb51c54
commit 77b2ebb811
4 changed files with 11 additions and 5 deletions

View File

@ -312,7 +312,7 @@ struct Peer {
std::chrono::microseconds m_next_inv_send_time GUARDED_BY(m_tx_inventory_mutex){0};
/** The mempool sequence num at which we sent the last `inv` message to this peer.
* Can relay txs with lower sequence numbers than this (see CTxMempool::info_for_relay). */
uint64_t m_last_inv_sequence GUARDED_BY(NetEventsInterface::g_msgproc_mutex){1};
uint64_t m_last_inv_sequence GUARDED_BY(m_tx_inventory_mutex){1};
/** Minimum fee rate with which to filter transaction announcements to this node. See BIP133. */
std::atomic<CAmount> m_fee_filter_received{0};
@ -942,7 +942,7 @@ private:
/** Determine whether or not a peer can request a transaction, and return it (or nullptr if not found or not allowed). */
CTransactionRef FindTxForGetData(const Peer::TxRelay& tx_relay, const GenTxid& gtxid)
EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex, NetEventsInterface::g_msgproc_mutex);
EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex, !tx_relay.m_tx_inventory_mutex);
void ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic<bool>& interruptMsgProc)
EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex, peer.m_getdata_requests_mutex, NetEventsInterface::g_msgproc_mutex)
@ -1728,7 +1728,9 @@ bool PeerManagerImpl::GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) c
if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) {
stats.m_relay_txs = WITH_LOCK(tx_relay->m_bloom_filter_mutex, return tx_relay->m_relay_txs);
stats.m_fee_filter_received = tx_relay->m_fee_filter_received.load();
stats.m_inv_to_send = WITH_LOCK(tx_relay->m_tx_inventory_mutex, return tx_relay->m_tx_inventory_to_send.size());
LOCK(tx_relay->m_tx_inventory_mutex);
stats.m_last_inv_seq = tx_relay->m_last_inv_sequence;
stats.m_inv_to_send = tx_relay->m_tx_inventory_to_send.size();
} else {
stats.m_relay_txs = false;
stats.m_fee_filter_received = 0;
@ -2364,8 +2366,8 @@ void PeerManagerImpl::ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv&
CTransactionRef PeerManagerImpl::FindTxForGetData(const Peer::TxRelay& tx_relay, const GenTxid& gtxid)
{
auto txinfo{std::visit(
[&](const auto& id) EXCLUSIVE_LOCKS_REQUIRED(NetEventsInterface::g_msgproc_mutex) {
return m_mempool.info_for_relay(id, tx_relay.m_last_inv_sequence);
[&](const auto& id) {
return m_mempool.info_for_relay(id, WITH_LOCK(tx_relay.m_tx_inventory_mutex, return tx_relay.m_last_inv_sequence));
},
gtxid)};

View File

@ -55,6 +55,7 @@ struct CNodeStateStats {
std::vector<int> vHeightInFlight;
bool m_relay_txs;
int m_inv_to_send = 0;
uint64_t m_last_inv_seq{0};
CAmount m_fee_filter_received;
uint64_t m_addr_processed = 0;
uint64_t m_addr_rate_limited = 0;

View File

@ -142,6 +142,7 @@ static RPCHelpMan getpeerinfo()
{RPCResult::Type::STR, "SERVICE_NAME", "the service name if it is recognised"}
}},
{RPCResult::Type::BOOL, "relaytxes", "Whether we relay transactions to this peer"},
{RPCResult::Type::NUM, "last_inv_sequence", "Mempool sequence number of this peer's last INV"},
{RPCResult::Type::NUM, "inv_to_send", "How many txs we have queued to announce to this peer"},
{RPCResult::Type::NUM_TIME, "lastsend", "The " + UNIX_EPOCH_TIME + " of the last send"},
{RPCResult::Type::NUM_TIME, "lastrecv", "The " + UNIX_EPOCH_TIME + " of the last receive"},
@ -239,6 +240,7 @@ static RPCHelpMan getpeerinfo()
obj.pushKV("services", strprintf("%016x", services));
obj.pushKV("servicesnames", GetServicesNames(services));
obj.pushKV("relaytxes", statestats.m_relay_txs);
obj.pushKV("last_inv_sequence", statestats.m_last_inv_seq);
obj.pushKV("inv_to_send", statestats.m_inv_to_send);
obj.pushKV("lastsend", count_seconds(stats.m_last_send));
obj.pushKV("lastrecv", count_seconds(stats.m_last_recv));

View File

@ -167,6 +167,7 @@ class NetTest(BitcoinTestFramework):
"presynced_headers": -1,
"relaytxes": False,
"inv_to_send": 0,
"last_inv_sequence": 0,
"services": "0000000000000000",
"servicesnames": [],
"session_id": "" if not self.options.v2transport else no_version_peer.v2_state.peer['session_id'].hex(),