mirror of
https://github.com/bitcoin/bitcoin.git
synced 2026-01-31 10:41:08 +00:00
Merge bitcoin/bitcoin#32740: refactor: Header sync optimisations & simplifications
de4242f47476769d0a7f3e79e8297ed2dd60d9a4 refactor: Use reference for chain_start in HeadersSyncState (Daniela Brozzoni) e37555e5401f9fca39ada0bd153e46b2c7ebd095 refactor: Use initializer list in CompressedHeader (Daniela Brozzoni) 0488bdfefe92b2c9a924be9244c91fe472462aab refactor: Remove unused parameter in ReportHeadersPresync (Daniela Brozzoni) 256246a9fa5b05141c93aeeb359394b9c7a80e49 refactor: Remove redundant parameter from CheckHeadersPoW (Daniela Brozzoni) ca0243e3a6d77d2b218749f1ba113b81444e3f4a refactor: Remove useless CBlock::GetBlockHeader (Pieter Wuille) 45686522224598bed9923e60daad109094d7bc29 refactor: Use std::span in HasValidProofOfWork (Daniela Brozzoni) 4066bfe561a45f61a3c9bf24bec7f600ddcc7467 refactor: Compute work from headers without CBlockIndex (Daniela Brozzoni) 0bf6139e194f355d121bb2aea74715d1c4099598 p2p: Avoid an IsAncestorOfBestHeaderOrTip call (Pieter Wuille) Pull request description: This is a partial* revival of #25968 It contains a list of most-unrelated simplifications and optimizations to the code merged in #25717: - Avoid an IsAncestorOfBestHeaderOrTip call: Just don't call this function when it won't have any effect. - Compute work from headers without CBlockIndex: Avoid the need to construct a CBlockIndex object just to compute work for a header, when its nBits value suffices for that. Also use some Spans where possible. - Remove useless CBlock::GetBlockHeader: There is no need for a function to convert a CBlock to a CBlockHeader, as it's a child class of it. It also contains the following code cleanups, which were suggested by reviewers in #25968: - Remove redundant parameter from CheckHeadersPoW: No need to pass consensusParams, as CheckHeadersPow already has access to m_chainparams.GetConsensus() - Remove unused parameter in ReportHeadersPresync - Use initializer list in CompressedHeader, also make GetFullHeader const - Use reference for chain_start in HeadersSyncState: chain_start can never be null, so it's better to pass it as a reference rather than a raw pointer *I decided to leave out three commits that were in #25968 (4e7ac7b94d04e056e9994ed1c8273c52b7b23931, ab52fb4e95aa2732d1a1391331ea01362e035984, 7f1cf440ca1a9c86085716745ca64d3ac26957c0), since they're a bit more involved, and I'm a new contributor. If this PR gets merged, I'll comment under #25968 to note that these three commits are still up for grabs :) ACKs for top commit: l0rinc: ACK de4242f47476769d0a7f3e79e8297ed2dd60d9a4 polespinasa: re-ACK de4242f47476769d0a7f3e79e8297ed2dd60d9a4 sipa: ACK de4242f47476769d0a7f3e79e8297ed2dd60d9a4 achow101: ACK de4242f47476769d0a7f3e79e8297ed2dd60d9a4 hodlinator: re-ACK de4242f47476769d0a7f3e79e8297ed2dd60d9a4 Tree-SHA512: 1de4f3ce0854a196712505f2b52ccb985856f5133769552bf37375225ea8664a3a7a6a9578c4fd461e935cd94a7cbbb08f15751a1da7651f8962c866146d9d4b
This commit is contained in:
commit
b0b65336e7
@ -118,12 +118,12 @@ void CBlockIndex::BuildSkip()
|
||||
pskip = pprev->GetAncestor(GetSkipHeight(nHeight));
|
||||
}
|
||||
|
||||
arith_uint256 GetBlockProof(const CBlockIndex& block)
|
||||
arith_uint256 GetBitsProof(uint32_t bits)
|
||||
{
|
||||
arith_uint256 bnTarget;
|
||||
bool fNegative;
|
||||
bool fOverflow;
|
||||
bnTarget.SetCompact(block.nBits, &fNegative, &fOverflow);
|
||||
bnTarget.SetCompact(bits, &fNegative, &fOverflow);
|
||||
if (fNegative || fOverflow || bnTarget == 0)
|
||||
return 0;
|
||||
// We need to compute 2**256 / (bnTarget+1), but we can't represent 2**256
|
||||
|
||||
10
src/chain.h
10
src/chain.h
@ -299,7 +299,15 @@ protected:
|
||||
CBlockIndex& operator=(CBlockIndex&&) = delete;
|
||||
};
|
||||
|
||||
arith_uint256 GetBlockProof(const CBlockIndex& block);
|
||||
/** Compute how much work an nBits value corresponds to. */
|
||||
arith_uint256 GetBitsProof(uint32_t bits);
|
||||
|
||||
/** Compute how much work a block index entry corresponds to. */
|
||||
inline arith_uint256 GetBlockProof(const CBlockIndex& block) { return GetBitsProof(block.nBits); }
|
||||
|
||||
/** Compute how much work a block header corresponds to. */
|
||||
inline arith_uint256 GetBlockProof(const CBlockHeader& header) { return GetBitsProof(header.nBits); }
|
||||
|
||||
/** Return the time it would take to redo the work difference between from and to, assuming the current hashrate corresponds to the difficulty at tip, in seconds. */
|
||||
int64_t GetBlockProofEquivalentTime(const CBlockIndex& to, const CBlockIndex& from, const CBlockIndex& tip, const Consensus::Params&);
|
||||
/** Find the forking point between two chain tips. */
|
||||
|
||||
@ -14,18 +14,21 @@
|
||||
// CompressedHeader (we should re-calculate parameters if we compress further).
|
||||
static_assert(sizeof(CompressedHeader) == 48);
|
||||
|
||||
HeadersSyncState::HeadersSyncState(NodeId id, const Consensus::Params& consensus_params,
|
||||
const HeadersSyncParams& params, const CBlockIndex* chain_start,
|
||||
const arith_uint256& minimum_required_work) :
|
||||
m_commit_offset((assert(params.commitment_period > 0), // HeadersSyncParams field must be initialized to non-zero.
|
||||
FastRandomContext().randrange(params.commitment_period))),
|
||||
m_id(id), m_consensus_params(consensus_params),
|
||||
m_params(params),
|
||||
m_chain_start(chain_start),
|
||||
m_minimum_required_work(minimum_required_work),
|
||||
m_current_chain_work(chain_start->nChainWork),
|
||||
m_last_header_received(m_chain_start->GetBlockHeader()),
|
||||
m_current_height(chain_start->nHeight)
|
||||
HeadersSyncState::HeadersSyncState(NodeId id,
|
||||
const Consensus::Params& consensus_params,
|
||||
const HeadersSyncParams& params,
|
||||
const CBlockIndex& chain_start,
|
||||
const arith_uint256& minimum_required_work)
|
||||
: m_commit_offset((assert(params.commitment_period > 0), // HeadersSyncParams field must be initialized to non-zero.
|
||||
FastRandomContext().randrange(params.commitment_period))),
|
||||
m_id(id),
|
||||
m_consensus_params(consensus_params),
|
||||
m_params(params),
|
||||
m_chain_start(chain_start),
|
||||
m_minimum_required_work(minimum_required_work),
|
||||
m_current_chain_work(chain_start.nChainWork),
|
||||
m_last_header_received(m_chain_start.GetBlockHeader()),
|
||||
m_current_height(chain_start.nHeight)
|
||||
{
|
||||
// Estimate the number of blocks that could possibly exist on the peer's
|
||||
// chain *right now* using 6 blocks/second (fastest blockrate given the MTP
|
||||
@ -35,7 +38,7 @@ HeadersSyncState::HeadersSyncState(NodeId id, const Consensus::Params& consensus
|
||||
// exceeds this bound, because it's not possible for a consensus-valid
|
||||
// chain to be longer than this (at the current time -- in the future we
|
||||
// could try again, if necessary, to sync a longer chain).
|
||||
const auto max_seconds_since_start{(Ticks<std::chrono::seconds>(NodeClock::now() - NodeSeconds{std::chrono::seconds{chain_start->GetMedianTimePast()}}))
|
||||
const auto max_seconds_since_start{(Ticks<std::chrono::seconds>(NodeClock::now() - NodeSeconds{std::chrono::seconds{chain_start.GetMedianTimePast()}}))
|
||||
+ MAX_FUTURE_BLOCK_TIME};
|
||||
m_max_commitments = 6 * max_seconds_since_start / m_params.commitment_period;
|
||||
|
||||
@ -161,10 +164,10 @@ bool HeadersSyncState::ValidateAndStoreHeadersCommitments(std::span<const CBlock
|
||||
|
||||
if (m_current_chain_work >= m_minimum_required_work) {
|
||||
m_redownloaded_headers.clear();
|
||||
m_redownload_buffer_last_height = m_chain_start->nHeight;
|
||||
m_redownload_buffer_first_prev_hash = m_chain_start->GetBlockHash();
|
||||
m_redownload_buffer_last_hash = m_chain_start->GetBlockHash();
|
||||
m_redownload_chain_work = m_chain_start->nChainWork;
|
||||
m_redownload_buffer_last_height = m_chain_start.nHeight;
|
||||
m_redownload_buffer_first_prev_hash = m_chain_start.GetBlockHash();
|
||||
m_redownload_buffer_last_hash = m_chain_start.GetBlockHash();
|
||||
m_redownload_chain_work = m_chain_start.nChainWork;
|
||||
m_download_state = State::REDOWNLOAD;
|
||||
LogDebug(BCLog::NET, "Initial headers sync transition with peer=%d: reached sufficient work at height=%i, redownloading from height=%i\n", m_id, m_current_height, m_redownload_buffer_last_height);
|
||||
}
|
||||
@ -202,7 +205,7 @@ bool HeadersSyncState::ValidateAndProcessSingleHeader(const CBlockHeader& curren
|
||||
}
|
||||
}
|
||||
|
||||
m_current_chain_work += GetBlockProof(CBlockIndex(current));
|
||||
m_current_chain_work += GetBlockProof(current);
|
||||
m_last_header_received = current;
|
||||
m_current_height = next_height;
|
||||
|
||||
@ -228,7 +231,7 @@ bool HeadersSyncState::ValidateAndStoreRedownloadedHeader(const CBlockHeader& he
|
||||
if (!m_redownloaded_headers.empty()) {
|
||||
previous_nBits = m_redownloaded_headers.back().nBits;
|
||||
} else {
|
||||
previous_nBits = m_chain_start->nBits;
|
||||
previous_nBits = m_chain_start.nBits;
|
||||
}
|
||||
|
||||
if (!PermittedDifficultyTransition(m_consensus_params, next_height,
|
||||
@ -238,7 +241,7 @@ bool HeadersSyncState::ValidateAndStoreRedownloadedHeader(const CBlockHeader& he
|
||||
}
|
||||
|
||||
// Track work on the redownloaded chain
|
||||
m_redownload_chain_work += GetBlockProof(CBlockIndex(header));
|
||||
m_redownload_chain_work += GetBlockProof(header);
|
||||
|
||||
if (m_redownload_chain_work >= m_minimum_required_work) {
|
||||
m_process_all_remaining_headers = true;
|
||||
@ -295,7 +298,7 @@ CBlockLocator HeadersSyncState::NextHeadersRequestLocator() const
|
||||
Assume(m_download_state != State::FINAL);
|
||||
if (m_download_state == State::FINAL) return {};
|
||||
|
||||
auto chain_start_locator = LocatorEntries(m_chain_start);
|
||||
auto chain_start_locator = LocatorEntries(&m_chain_start);
|
||||
std::vector<uint256> locator;
|
||||
|
||||
if (m_download_state == State::PRESYNC) {
|
||||
|
||||
@ -31,16 +31,17 @@ struct CompressedHeader {
|
||||
hashMerkleRoot.SetNull();
|
||||
}
|
||||
|
||||
CompressedHeader(const CBlockHeader& header)
|
||||
explicit CompressedHeader(const CBlockHeader& header)
|
||||
: nVersion{header.nVersion},
|
||||
hashMerkleRoot{header.hashMerkleRoot},
|
||||
nTime{header.nTime},
|
||||
nBits{header.nBits},
|
||||
nNonce{header.nNonce}
|
||||
{
|
||||
nVersion = header.nVersion;
|
||||
hashMerkleRoot = header.hashMerkleRoot;
|
||||
nTime = header.nTime;
|
||||
nBits = header.nBits;
|
||||
nNonce = header.nNonce;
|
||||
}
|
||||
|
||||
CBlockHeader GetFullHeader(const uint256& hash_prev_block) {
|
||||
CBlockHeader GetFullHeader(const uint256& hash_prev_block) const
|
||||
{
|
||||
CBlockHeader ret;
|
||||
ret.nVersion = nVersion;
|
||||
ret.hashPrevBlock = hash_prev_block;
|
||||
@ -136,8 +137,8 @@ public:
|
||||
* minimum_required_work: amount of chain work required to accept the chain
|
||||
*/
|
||||
HeadersSyncState(NodeId id, const Consensus::Params& consensus_params,
|
||||
const HeadersSyncParams& params, const CBlockIndex* chain_start,
|
||||
const arith_uint256& minimum_required_work);
|
||||
const HeadersSyncParams& params, const CBlockIndex& chain_start,
|
||||
const arith_uint256& minimum_required_work);
|
||||
|
||||
/** Result data structure for ProcessNextHeaders. */
|
||||
struct ProcessingResult {
|
||||
@ -219,7 +220,7 @@ private:
|
||||
const HeadersSyncParams m_params;
|
||||
|
||||
/** Store the last block in our block index that the peer's chain builds from */
|
||||
const CBlockIndex* m_chain_start{nullptr};
|
||||
const CBlockIndex& m_chain_start;
|
||||
|
||||
/** Minimum work that we're looking for on this chain. */
|
||||
const arith_uint256 m_minimum_required_work;
|
||||
|
||||
@ -29,7 +29,7 @@ std::vector<bool> BytesToBits(const std::vector<unsigned char>& bytes)
|
||||
|
||||
CMerkleBlock::CMerkleBlock(const CBlock& block, CBloomFilter* filter, const std::set<Txid>* txids)
|
||||
{
|
||||
header = block.GetBlockHeader();
|
||||
header = static_cast<const CBlockHeader&>(block);
|
||||
|
||||
std::vector<bool> vMatch;
|
||||
std::vector<Txid> vHashes;
|
||||
|
||||
@ -655,7 +655,7 @@ private:
|
||||
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_headers_presync_mutex, g_msgproc_mutex);
|
||||
/** Various helpers for headers processing, invoked by ProcessHeadersMessage() */
|
||||
/** Return true if headers are continuous and have valid proof-of-work (DoS points assigned on failure) */
|
||||
bool CheckHeadersPoW(const std::vector<CBlockHeader>& headers, const Consensus::Params& consensusParams, Peer& peer);
|
||||
bool CheckHeadersPoW(const std::vector<CBlockHeader>& headers, Peer& peer);
|
||||
/** Calculate an anti-DoS work threshold for headers chains */
|
||||
arith_uint256 GetAntiDoSWorkThreshold();
|
||||
/** Deal with state tracking and headers sync for peers that send
|
||||
@ -697,8 +697,8 @@ private:
|
||||
* calling); false otherwise.
|
||||
*/
|
||||
bool TryLowWorkHeadersSync(Peer& peer, CNode& pfrom,
|
||||
const CBlockIndex* chain_start_header,
|
||||
std::vector<CBlockHeader>& headers)
|
||||
const CBlockIndex& chain_start_header,
|
||||
std::vector<CBlockHeader>& headers)
|
||||
EXCLUSIVE_LOCKS_REQUIRED(!peer.m_headers_sync_mutex, !m_peer_mutex, !m_headers_presync_mutex, g_msgproc_mutex);
|
||||
|
||||
/** Return true if the given header is an ancestor of
|
||||
@ -2584,10 +2584,10 @@ void PeerManagerImpl::SendBlockTransactions(CNode& pfrom, Peer& peer, const CBlo
|
||||
MakeAndPushMessage(pfrom, NetMsgType::BLOCKTXN, resp);
|
||||
}
|
||||
|
||||
bool PeerManagerImpl::CheckHeadersPoW(const std::vector<CBlockHeader>& headers, const Consensus::Params& consensusParams, Peer& peer)
|
||||
bool PeerManagerImpl::CheckHeadersPoW(const std::vector<CBlockHeader>& headers, Peer& peer)
|
||||
{
|
||||
// Do these headers have proof-of-work matching what's claimed?
|
||||
if (!HasValidProofOfWork(headers, consensusParams)) {
|
||||
if (!HasValidProofOfWork(headers, m_chainparams.GetConsensus())) {
|
||||
Misbehaving(peer, "header with invalid proof of work");
|
||||
return false;
|
||||
}
|
||||
@ -2732,10 +2732,10 @@ bool PeerManagerImpl::IsContinuationOfLowWorkHeadersSync(Peer& peer, CNode& pfro
|
||||
return false;
|
||||
}
|
||||
|
||||
bool PeerManagerImpl::TryLowWorkHeadersSync(Peer& peer, CNode& pfrom, const CBlockIndex* chain_start_header, std::vector<CBlockHeader>& headers)
|
||||
bool PeerManagerImpl::TryLowWorkHeadersSync(Peer& peer, CNode& pfrom, const CBlockIndex& chain_start_header, std::vector<CBlockHeader>& headers)
|
||||
{
|
||||
// Calculate the claimed total work on this chain.
|
||||
arith_uint256 total_work = chain_start_header->nChainWork + CalculateClaimedHeadersWork(headers);
|
||||
arith_uint256 total_work = chain_start_header.nChainWork + CalculateClaimedHeadersWork(headers);
|
||||
|
||||
// Our dynamic anti-DoS threshold (minimum work required on a headers chain
|
||||
// before we'll store it)
|
||||
@ -2766,7 +2766,7 @@ bool PeerManagerImpl::TryLowWorkHeadersSync(Peer& peer, CNode& pfrom, const CBlo
|
||||
// handled inside of IsContinuationOfLowWorkHeadersSync.
|
||||
(void)IsContinuationOfLowWorkHeadersSync(peer, pfrom, headers);
|
||||
} else {
|
||||
LogDebug(BCLog::NET, "Ignoring low-work chain (height=%u) from peer=%d\n", chain_start_header->nHeight + headers.size(), pfrom.GetId());
|
||||
LogDebug(BCLog::NET, "Ignoring low-work chain (height=%u) from peer=%d\n", chain_start_header.nHeight + headers.size(), pfrom.GetId());
|
||||
}
|
||||
|
||||
// The peer has not yet given us a chain that meets our work threshold,
|
||||
@ -2952,7 +2952,7 @@ void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, Peer& peer,
|
||||
// We'll rely on headers having valid proof-of-work further down, as an
|
||||
// anti-DoS criteria (note: this check is required before passing any
|
||||
// headers into HeadersSyncState).
|
||||
if (!CheckHeadersPoW(headers, m_chainparams.GetConsensus(), peer)) {
|
||||
if (!CheckHeadersPoW(headers, peer)) {
|
||||
// Misbehaving() calls are handled within CheckHeadersPoW(), so we can
|
||||
// just return. (Note that even if a header is announced via compact
|
||||
// block, the header itself should be valid, so this type of error can
|
||||
@ -3018,9 +3018,7 @@ void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, Peer& peer,
|
||||
{
|
||||
LOCK(cs_main);
|
||||
last_received_header = m_chainman.m_blockman.LookupBlockIndex(headers.back().GetHash());
|
||||
if (IsAncestorOfBestHeaderOrTip(last_received_header)) {
|
||||
already_validated_work = true;
|
||||
}
|
||||
already_validated_work = already_validated_work || IsAncestorOfBestHeaderOrTip(last_received_header);
|
||||
}
|
||||
|
||||
// If our peer has NetPermissionFlags::NoBan privileges, then bypass our
|
||||
@ -3034,7 +3032,7 @@ void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, Peer& peer,
|
||||
// Do anti-DoS checks to determine if we should process or store for later
|
||||
// processing.
|
||||
if (!already_validated_work && TryLowWorkHeadersSync(peer, pfrom,
|
||||
chain_start_header, headers)) {
|
||||
*chain_start_header, headers)) {
|
||||
// If we successfully started a low-work headers sync, then there
|
||||
// should be no headers to process any further.
|
||||
Assume(headers.empty());
|
||||
@ -4548,7 +4546,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
|
||||
MaybeSendGetHeaders(pfrom, GetLocator(m_chainman.m_best_header), *peer);
|
||||
}
|
||||
return;
|
||||
} else if (prev_block->nChainWork + CalculateClaimedHeadersWork({{cmpctblock.header}}) < GetAntiDoSWorkThreshold()) {
|
||||
} else if (prev_block->nChainWork + GetBlockProof(cmpctblock.header) < GetAntiDoSWorkThreshold()) {
|
||||
// If we get a low-work header in a compact block, we can ignore it.
|
||||
LogDebug(BCLog::NET, "Ignoring low-work compact block from peer %d\n", pfrom.GetId());
|
||||
return;
|
||||
@ -4820,7 +4818,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
|
||||
if (it != m_headers_presync_stats.end()) stats = it->second;
|
||||
}
|
||||
if (stats.second) {
|
||||
m_chainman.ReportHeadersPresync(stats.first, stats.second->first, stats.second->second);
|
||||
m_chainman.ReportHeadersPresync(stats.second->first, stats.second->second);
|
||||
}
|
||||
}
|
||||
|
||||
@ -4866,7 +4864,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
|
||||
mapBlockSource.emplace(hash, std::make_pair(pfrom.GetId(), true));
|
||||
|
||||
// Check claimed work on this block against our anti-dos thresholds.
|
||||
if (prev_block && prev_block->nChainWork + CalculateClaimedHeadersWork({{pblock->GetBlockHeader()}}) >= GetAntiDoSWorkThreshold()) {
|
||||
if (prev_block && prev_block->nChainWork + GetBlockProof(*pblock) >= GetAntiDoSWorkThreshold()) {
|
||||
min_pow_checked = true;
|
||||
}
|
||||
}
|
||||
|
||||
@ -101,18 +101,6 @@ public:
|
||||
m_checked_merkle_root = false;
|
||||
}
|
||||
|
||||
CBlockHeader GetBlockHeader() const
|
||||
{
|
||||
CBlockHeader block;
|
||||
block.nVersion = nVersion;
|
||||
block.hashPrevBlock = hashPrevBlock;
|
||||
block.hashMerkleRoot = hashMerkleRoot;
|
||||
block.nTime = nTime;
|
||||
block.nBits = nBits;
|
||||
block.nNonce = nNonce;
|
||||
return block;
|
||||
}
|
||||
|
||||
std::string ToString() const;
|
||||
};
|
||||
|
||||
|
||||
@ -102,10 +102,9 @@ bool BuildChainTestingSetup::BuildChain(const CBlockIndex* pindex,
|
||||
chain.resize(length);
|
||||
for (auto& block : chain) {
|
||||
block = std::make_shared<CBlock>(CreateBlock(pindex, no_txns, coinbase_script_pub_key));
|
||||
CBlockHeader header = block->GetBlockHeader();
|
||||
|
||||
BlockValidationState state;
|
||||
if (!Assert(m_node.chainman)->ProcessNewBlockHeaders({{header}}, true, state, &pindex)) {
|
||||
if (!Assert(m_node.chainman)->ProcessNewBlockHeaders({{*block}}, true, state, &pindex)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@ -33,10 +33,10 @@ FUZZ_TARGET(block_header)
|
||||
mut_block_header.SetNull();
|
||||
assert(mut_block_header.IsNull());
|
||||
CBlock block{*block_header};
|
||||
assert(block.GetBlockHeader().GetHash() == block_header->GetHash());
|
||||
assert(block.GetHash() == block_header->GetHash());
|
||||
(void)block.ToString();
|
||||
block.SetNull();
|
||||
assert(block.GetBlockHeader().GetHash() == mut_block_header.GetHash());
|
||||
assert(block.GetHash() == mut_block_header.GetHash());
|
||||
}
|
||||
{
|
||||
std::optional<CBlockLocator> block_locator = ConsumeDeserializable<CBlockLocator>(fuzzed_data_provider);
|
||||
|
||||
@ -44,7 +44,7 @@ class FuzzedHeadersSyncState : public HeadersSyncState
|
||||
{
|
||||
public:
|
||||
FuzzedHeadersSyncState(const HeadersSyncParams& sync_params, const size_t commit_offset,
|
||||
const CBlockIndex* chain_start, const arith_uint256& minimum_required_work)
|
||||
const CBlockIndex& chain_start, const arith_uint256& minimum_required_work)
|
||||
: HeadersSyncState(/*id=*/0, Params().GetConsensus(), sync_params, chain_start, minimum_required_work)
|
||||
{
|
||||
const_cast<size_t&>(m_commit_offset) = commit_offset;
|
||||
@ -74,7 +74,7 @@ FUZZ_TARGET(headers_sync_state, .init = initialize_headers_sync_state_fuzz)
|
||||
FuzzedHeadersSyncState headers_sync(
|
||||
params,
|
||||
/*commit_offset=*/fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, params.commitment_period - 1),
|
||||
/*chain_start=*/&start_index,
|
||||
/*chain_start=*/start_index,
|
||||
/*minimum_required_work=*/min_work);
|
||||
|
||||
// Store headers for potential redownload phase.
|
||||
|
||||
@ -89,7 +89,7 @@ void initialize_chain()
|
||||
auto& chainman{*setup->m_node.chainman};
|
||||
for (const auto& block : chain) {
|
||||
BlockValidationState dummy;
|
||||
bool processed{chainman.ProcessNewBlockHeaders({{block->GetBlockHeader()}}, true, dummy)};
|
||||
bool processed{chainman.ProcessNewBlockHeaders({{*block}}, true, dummy)};
|
||||
Assert(processed);
|
||||
const auto* index{WITH_LOCK(::cs_main, return chainman.m_blockman.LookupBlockIndex(block->GetHash()))};
|
||||
Assert(index);
|
||||
@ -171,7 +171,7 @@ void utxo_snapshot_fuzz(FuzzBufferType buffer)
|
||||
if constexpr (!INVALID) {
|
||||
for (const auto& block : *g_chain) {
|
||||
BlockValidationState dummy;
|
||||
bool processed{chainman.ProcessNewBlockHeaders({{block->GetBlockHeader()}}, true, dummy)};
|
||||
bool processed{chainman.ProcessNewBlockHeaders({{*block}}, true, dummy)};
|
||||
Assert(processed);
|
||||
const auto* index{WITH_LOCK(::cs_main, return chainman.m_blockman.LookupBlockIndex(block->GetHash()))};
|
||||
Assert(index);
|
||||
|
||||
@ -52,7 +52,7 @@ constexpr size_t COMMITMENT_PERIOD{600}; // Somewhat close to mainnet.
|
||||
|
||||
struct HeadersGeneratorSetup : public RegTestingSetup {
|
||||
const CBlock& genesis{Params().GenesisBlock()};
|
||||
const CBlockIndex* chain_start{WITH_LOCK(::cs_main, return m_node.chainman->m_blockman.LookupBlockIndex(genesis.GetHash()))};
|
||||
CBlockIndex& chain_start{WITH_LOCK(::cs_main, return *Assert(m_node.chainman->m_blockman.LookupBlockIndex(genesis.GetHash())))};
|
||||
|
||||
// Generate headers for two different chains (using differing merkle roots
|
||||
// to ensure the headers are different).
|
||||
|
||||
@ -105,7 +105,7 @@ std::shared_ptr<CBlock> MinerTestingSetup::FinalizeBlock(std::shared_ptr<CBlock>
|
||||
// submit block header, so that miner can get the block height from the
|
||||
// global state and the node has the topology of the chain
|
||||
BlockValidationState ignored;
|
||||
BOOST_CHECK(Assert(m_node.chainman)->ProcessNewBlockHeaders({{pblock->GetBlockHeader()}}, true, ignored));
|
||||
BOOST_CHECK(Assert(m_node.chainman)->ProcessNewBlockHeaders({{*pblock}}, true, ignored));
|
||||
|
||||
return pblock;
|
||||
}
|
||||
|
||||
@ -4079,10 +4079,10 @@ std::vector<unsigned char> ChainstateManager::GenerateCoinbaseCommitment(CBlock&
|
||||
return commitment;
|
||||
}
|
||||
|
||||
bool HasValidProofOfWork(const std::vector<CBlockHeader>& headers, const Consensus::Params& consensusParams)
|
||||
bool HasValidProofOfWork(std::span<const CBlockHeader> headers, const Consensus::Params& consensusParams)
|
||||
{
|
||||
return std::all_of(headers.cbegin(), headers.cend(),
|
||||
[&](const auto& header) { return CheckProofOfWork(header.GetHash(), header.nBits, consensusParams);});
|
||||
return std::ranges::all_of(headers,
|
||||
[&](const auto& header) { return CheckProofOfWork(header.GetHash(), header.nBits, consensusParams); });
|
||||
}
|
||||
|
||||
bool IsBlockMutated(const CBlock& block, bool check_witness_root)
|
||||
@ -4120,8 +4120,7 @@ arith_uint256 CalculateClaimedHeadersWork(std::span<const CBlockHeader> headers)
|
||||
{
|
||||
arith_uint256 total_work{0};
|
||||
for (const CBlockHeader& header : headers) {
|
||||
CBlockIndex dummy(header);
|
||||
total_work += GetBlockProof(dummy);
|
||||
total_work += GetBlockProof(header);
|
||||
}
|
||||
return total_work;
|
||||
}
|
||||
@ -4331,7 +4330,7 @@ bool ChainstateManager::ProcessNewBlockHeaders(std::span<const CBlockHeader> hea
|
||||
return true;
|
||||
}
|
||||
|
||||
void ChainstateManager::ReportHeadersPresync(const arith_uint256& work, int64_t height, int64_t timestamp)
|
||||
void ChainstateManager::ReportHeadersPresync(int64_t height, int64_t timestamp)
|
||||
{
|
||||
AssertLockNotHeld(GetMutex());
|
||||
{
|
||||
|
||||
@ -413,8 +413,8 @@ BlockValidationState TestBlockValidity(
|
||||
bool check_pow,
|
||||
bool check_merkle_root) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
|
||||
|
||||
/** Check with the proof of work on each blockheader matches the value in nBits */
|
||||
bool HasValidProofOfWork(const std::vector<CBlockHeader>& headers, const Consensus::Params& consensusParams);
|
||||
/** Check that the proof of work on each blockheader matches the value in nBits */
|
||||
bool HasValidProofOfWork(std::span<const CBlockHeader> headers, const Consensus::Params& consensusParams);
|
||||
|
||||
/** Check if a block has been mutated (with respect to its merkle root and witness commitments). */
|
||||
bool IsBlockMutated(const CBlock& block, bool check_witness_root);
|
||||
@ -1298,7 +1298,7 @@ public:
|
||||
* headers are not yet fed to validation during that time, but validation is (for now)
|
||||
* responsible for logging and signalling through NotifyHeaderTip, so it needs this
|
||||
* information. */
|
||||
void ReportHeadersPresync(const arith_uint256& work, int64_t height, int64_t timestamp);
|
||||
void ReportHeadersPresync(int64_t height, int64_t timestamp);
|
||||
|
||||
//! When starting up, search the datadir for a chainstate based on a UTXO
|
||||
//! snapshot that is in the process of being validated and load it if found.
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user