mirror of
https://github.com/bitcoin/bitcoin.git
synced 2026-01-31 18:51:12 +00:00
Merge bitcoin/bitcoin#29640: Fix tiebreak when loading blocks from disk (and add tests for comparing chain ties)
0465574c127907df9b764055a585e8281bae8d1d test: Fixes send_blocks_and_test docs (Sergi Delgado Segura) 09c95f21e71d196120e6c9d0b1d1923a4927408d test: Adds block tiebreak over restarts tests (Sergi Delgado Segura) 18524b072e6bdd590a9f6badd15d897b5ef5ce54 Make nSequenceId init value constants (Sergi Delgado Segura) 8b91883a23aac64a37d929eeae81325e221d177d Set the same best tip on restart if two candidates have the same work (Sergi Delgado Segura) 5370bed21e0b04feca6ec09738ecbe792095a338 test: add functional test for complex reorgs (Pieter Wuille) ab145cb3b471d07a2e8ee79edde46ec67f47d580 Updates CBlockIndexWorkComparator outdated comment (Sergi Delgado Segura) Pull request description: This PR grabs some interesting bits from https://github.com/bitcoin/bitcoin/pull/29284 and fixes some edge cases in how block tiebreaks are dealt with. ## Regarding #29284 The main functionality from the PR was dropped given it was not an issue anymore, however, reviewers pointed out some comments were outdated https://github.com/bitcoin/bitcoin/pull/29284#discussion_r1522023578 (which to my understanding may have led to thinking that there was still an issue) it also added test coverage for the aforementioned case which was already passing on master and is useful to keep. ## New functionality While reviewing the superseded PR, it was noticed that blocks that are loaded from disk may face a similar issue (check https://github.com/bitcoin/bitcoin/pull/29284#issuecomment-1994317785 for more context). The issue comes from how tiebreaks for equal work blocks are handled: if two blocks have the same amount of work, the one that is activatable first wins, that is, the one for which we have all its data (and all of its ancestors'). The variable that keeps track of this, within `CBlockIndex` is `nSequenceId`, which is not persisted over restarts. This means that when a node is restarted, all blocks loaded from disk are defaulted the same `nSequenceId`: 0. Now, when trying to decide what chain is best on loading blocks from disk, the previous tiebreaker rule is not decisive anymore, so the `CBlockIndexWorkComparator` has to default to its last rule: whatever block is loaded first (has a smaller memory address). This means that if multiple same work tip candidates were available before restarting the node, it could be the case that the selected chain tip after restarting does not match the one before. Therefore, the way `nSequenceId` is initialized is changed to: - 0 for blocks that belong to the previously known best chain - 1 to all other blocks loaded from disk ACKs for top commit: sipa: utACK 0465574c127907df9b764055a585e8281bae8d1d TheCharlatan: ACK 0465574c127907df9b764055a585e8281bae8d1d furszy: Tested ACK 0465574c127907df9b764055a585e8281bae8d1d. Tree-SHA512: 161da814da03ce10c34d27d79a315460a9c98d019b85ee35bc5daa991ed3b6a2e69a829e421fc70d093a83cf7a2e403763041e594df39ed1991445e54c16532a
This commit is contained in:
commit
56e9703968
@ -35,6 +35,9 @@ static constexpr int64_t MAX_FUTURE_BLOCK_TIME = 2 * 60 * 60;
|
||||
* MAX_FUTURE_BLOCK_TIME.
|
||||
*/
|
||||
static constexpr int64_t TIMESTAMP_WINDOW = MAX_FUTURE_BLOCK_TIME;
|
||||
//! Init values for CBlockIndex nSequenceId when loaded from disk
|
||||
static constexpr int32_t SEQ_ID_BEST_CHAIN_FROM_DISK = 0;
|
||||
static constexpr int32_t SEQ_ID_INIT_FROM_DISK = 1;
|
||||
|
||||
/**
|
||||
* Maximum gap between node time and block time used
|
||||
@ -191,7 +194,9 @@ public:
|
||||
uint32_t nNonce{0};
|
||||
|
||||
//! (memory only) Sequential id assigned to distinguish order in which blocks are received.
|
||||
int32_t nSequenceId{0};
|
||||
//! Initialized to SEQ_ID_INIT_FROM_DISK{1} when loading blocks from disk, except for blocks
|
||||
//! belonging to the best chain which overwrite it to SEQ_ID_BEST_CHAIN_FROM_DISK{0}.
|
||||
int32_t nSequenceId{SEQ_ID_INIT_FROM_DISK};
|
||||
|
||||
//! (memory only) Maximum nTime in the chain up to and including this block.
|
||||
unsigned int nTimeMax{0};
|
||||
|
||||
@ -161,12 +161,13 @@ bool CBlockIndexWorkComparator::operator()(const CBlockIndex* pa, const CBlockIn
|
||||
if (pa->nChainWork > pb->nChainWork) return false;
|
||||
if (pa->nChainWork < pb->nChainWork) return true;
|
||||
|
||||
// ... then by earliest time received, ...
|
||||
// ... then by earliest activatable time, ...
|
||||
if (pa->nSequenceId < pb->nSequenceId) return false;
|
||||
if (pa->nSequenceId > pb->nSequenceId) return true;
|
||||
|
||||
// Use pointer address as tie breaker (should only happen with blocks
|
||||
// loaded from disk, as those all have id 0).
|
||||
// loaded from disk, as those share the same id: 0 for blocks on the
|
||||
// best chain, 1 for all others).
|
||||
if (pa < pb) return false;
|
||||
if (pa > pb) return true;
|
||||
|
||||
@ -217,7 +218,7 @@ CBlockIndex* BlockManager::AddToBlockIndex(const CBlockHeader& block, CBlockInde
|
||||
// We assign the sequence id to blocks only when the full data is available,
|
||||
// to avoid miners withholding blocks but broadcasting headers, to get a
|
||||
// competitive advantage.
|
||||
pindexNew->nSequenceId = 0;
|
||||
pindexNew->nSequenceId = SEQ_ID_INIT_FROM_DISK;
|
||||
|
||||
pindexNew->phashBlock = &((*mi).first);
|
||||
BlockMap::iterator miPrev = m_block_index.find(block.hashPrevBlock);
|
||||
|
||||
@ -4660,7 +4660,7 @@ bool Chainstate::LoadChainTip()
|
||||
AssertLockHeld(cs_main);
|
||||
const CCoinsViewCache& coins_cache = CoinsTip();
|
||||
assert(!coins_cache.GetBestBlock().IsNull()); // Never called when the coins view is empty
|
||||
const CBlockIndex* tip = m_chain.Tip();
|
||||
CBlockIndex* tip = m_chain.Tip();
|
||||
|
||||
if (tip && tip->GetBlockHash() == coins_cache.GetBestBlock()) {
|
||||
return true;
|
||||
@ -4672,6 +4672,20 @@ bool Chainstate::LoadChainTip()
|
||||
return false;
|
||||
}
|
||||
m_chain.SetTip(*pindex);
|
||||
tip = m_chain.Tip();
|
||||
|
||||
// Make sure our chain tip before shutting down scores better than any other candidate
|
||||
// to maintain a consistent best tip over reboots in case of a tie.
|
||||
auto target = tip;
|
||||
while (target) {
|
||||
target->nSequenceId = SEQ_ID_BEST_CHAIN_FROM_DISK;
|
||||
target = target->pprev;
|
||||
}
|
||||
|
||||
// Block index candidates are loaded before the chain tip, so we need to replace this entry
|
||||
// Otherwise the scoring will be based on the memory address location instead of the nSequenceId
|
||||
setBlockIndexCandidates.erase(tip);
|
||||
TryAddBlockIndexCandidate(tip);
|
||||
PruneBlockIndexCandidates();
|
||||
|
||||
tip = m_chain.Tip();
|
||||
@ -5323,7 +5337,9 @@ void ChainstateManager::CheckBlockIndex() const
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!pindex->HaveNumChainTxs()) assert(pindex->nSequenceId <= 0); // nSequenceId can't be set positive for blocks that aren't linked (negative is used for preciousblock)
|
||||
// nSequenceId can't be set higher than SEQ_ID_INIT_FROM_DISK{1} for blocks that aren't linked
|
||||
// (negative is used for preciousblock, SEQ_ID_BEST_CHAIN_FROM_DISK{0} for active chain when loaded from disk)
|
||||
if (!pindex->HaveNumChainTxs()) assert(pindex->nSequenceId <= SEQ_ID_INIT_FROM_DISK);
|
||||
// VALID_TRANSACTIONS is equivalent to nTx > 0 for all nodes (whether or not pruning has occurred).
|
||||
// HAVE_DATA is only equivalent to nTx > 0 (or VALID_TRANSACTIONS) if no pruning has occurred.
|
||||
if (!m_blockman.m_have_pruned) {
|
||||
|
||||
@ -1052,8 +1052,10 @@ public:
|
||||
* Every received block is assigned a unique and increasing identifier, so we
|
||||
* know which one to give priority in case of a fork.
|
||||
*/
|
||||
/** Blocks loaded from disk are assigned id 0, so start the counter at 1. */
|
||||
int32_t nBlockSequenceId GUARDED_BY(::cs_main) = 1;
|
||||
/** Blocks loaded from disk are assigned id SEQ_ID_INIT_FROM_DISK{1}
|
||||
* (SEQ_ID_BEST_CHAIN_FROM_DISK{0} if they belong to the best chain loaded from disk),
|
||||
* so start the counter after that. **/
|
||||
int32_t nBlockSequenceId GUARDED_BY(::cs_main) = SEQ_ID_INIT_FROM_DISK + 1;
|
||||
/** Decreasing counter (used by subsequent preciousblock calls). */
|
||||
int32_t nBlockReverseSequenceId = -1;
|
||||
/** chainwork for the last block that preciousblock has been applied to. */
|
||||
@ -1064,7 +1066,7 @@ public:
|
||||
void ResetBlockSequenceCounters() EXCLUSIVE_LOCKS_REQUIRED(::cs_main)
|
||||
{
|
||||
AssertLockHeld(::cs_main);
|
||||
nBlockSequenceId = 1;
|
||||
nBlockSequenceId = SEQ_ID_INIT_FROM_DISK + 1;
|
||||
nBlockReverseSequenceId = -1;
|
||||
}
|
||||
|
||||
|
||||
151
test/functional/feature_chain_tiebreaks.py
Executable file
151
test/functional/feature_chain_tiebreaks.py
Executable file
@ -0,0 +1,151 @@
|
||||
#!/usr/bin/env python3
|
||||
# Copyright (c) The Bitcoin Core developers
|
||||
# Distributed under the MIT software license, see the accompanying
|
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
"""Test that the correct active block is chosen in complex reorgs."""
|
||||
|
||||
from test_framework.blocktools import create_block
|
||||
from test_framework.messages import CBlockHeader
|
||||
from test_framework.p2p import P2PDataStore
|
||||
from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.util import assert_equal
|
||||
|
||||
class ChainTiebreaksTest(BitcoinTestFramework):
|
||||
def set_test_params(self):
|
||||
self.num_nodes = 2
|
||||
self.setup_clean_chain = True
|
||||
|
||||
@staticmethod
|
||||
def send_headers(node, blocks):
|
||||
"""Submit headers for blocks to node."""
|
||||
for block in blocks:
|
||||
# Use RPC rather than P2P, to prevent the message from being interpreted as a block
|
||||
# announcement.
|
||||
node.submitheader(hexdata=CBlockHeader(block).serialize().hex())
|
||||
|
||||
def test_chain_split_in_memory(self):
|
||||
node = self.nodes[0]
|
||||
# Add P2P connection to bitcoind
|
||||
peer = node.add_p2p_connection(P2PDataStore())
|
||||
|
||||
self.log.info('Precomputing blocks')
|
||||
#
|
||||
# /- B3 -- B7
|
||||
# B1 \- B8
|
||||
# / \
|
||||
# / \ B4 -- B9
|
||||
# B0 \- B10
|
||||
# \
|
||||
# \ /- B5
|
||||
# B2
|
||||
# \- B6
|
||||
#
|
||||
blocks = []
|
||||
|
||||
# Construct B0, building off genesis.
|
||||
start_height = node.getblockcount()
|
||||
blocks.append(create_block(
|
||||
hashprev=int(node.getbestblockhash(), 16),
|
||||
tmpl={"height": start_height + 1}
|
||||
))
|
||||
blocks[-1].solve()
|
||||
|
||||
# Construct B1-B10.
|
||||
for i in range(1, 11):
|
||||
blocks.append(create_block(
|
||||
hashprev=blocks[(i - 1) >> 1].hash_int,
|
||||
tmpl={
|
||||
"height": start_height + (i + 1).bit_length(),
|
||||
# Make sure each block has a different hash.
|
||||
"curtime": blocks[-1].nTime + 1,
|
||||
}
|
||||
))
|
||||
blocks[-1].solve()
|
||||
|
||||
self.log.info('Make sure B0 is accepted normally')
|
||||
peer.send_blocks_and_test([blocks[0]], node, success=True)
|
||||
# B0 must be active chain now.
|
||||
assert_equal(node.getbestblockhash(), blocks[0].hash_hex)
|
||||
|
||||
self.log.info('Send B1 and B2 headers, and then blocks in opposite order')
|
||||
self.send_headers(node, blocks[1:3])
|
||||
peer.send_blocks_and_test([blocks[2]], node, success=True)
|
||||
peer.send_blocks_and_test([blocks[1]], node, success=False)
|
||||
# B2 must be active chain now, as full data for B2 was received first.
|
||||
assert_equal(node.getbestblockhash(), blocks[2].hash_hex)
|
||||
|
||||
self.log.info('Send all further headers in order')
|
||||
self.send_headers(node, blocks[3:])
|
||||
# B2 is still the active chain, headers don't change this.
|
||||
assert_equal(node.getbestblockhash(), blocks[2].hash_hex)
|
||||
|
||||
self.log.info('Send blocks B7-B10')
|
||||
peer.send_blocks_and_test([blocks[7]], node, success=False)
|
||||
peer.send_blocks_and_test([blocks[8]], node, success=False)
|
||||
peer.send_blocks_and_test([blocks[9]], node, success=False)
|
||||
peer.send_blocks_and_test([blocks[10]], node, success=False)
|
||||
# B2 is still the active chain, as B7-B10 have missing parents.
|
||||
assert_equal(node.getbestblockhash(), blocks[2].hash_hex)
|
||||
|
||||
self.log.info('Send parents B3-B4 of B8-B10 in reverse order')
|
||||
peer.send_blocks_and_test([blocks[4]], node, success=False, force_send=True)
|
||||
peer.send_blocks_and_test([blocks[3]], node, success=False, force_send=True)
|
||||
# B9 is now active. Despite B7 being received earlier, the missing parent.
|
||||
assert_equal(node.getbestblockhash(), blocks[9].hash_hex)
|
||||
|
||||
self.log.info('Invalidate B9-B10')
|
||||
node.invalidateblock(blocks[9].hash_hex)
|
||||
node.invalidateblock(blocks[10].hash_hex)
|
||||
# B7 is now active.
|
||||
assert_equal(node.getbestblockhash(), blocks[7].hash_hex)
|
||||
|
||||
# Invalidate blocks to start fresh on the next test
|
||||
node.invalidateblock(blocks[0].hash_hex)
|
||||
|
||||
def test_chain_split_from_disk(self):
|
||||
node = self.nodes[0]
|
||||
peer = node.add_p2p_connection(P2PDataStore())
|
||||
|
||||
self.log.info('Precomputing blocks')
|
||||
#
|
||||
# A1
|
||||
# /
|
||||
# G
|
||||
# \
|
||||
# A2
|
||||
#
|
||||
blocks = []
|
||||
|
||||
# Construct two blocks building from genesis.
|
||||
start_height = node.getblockcount()
|
||||
genesis_block = node.getblock(node.getblockhash(start_height))
|
||||
prev_time = genesis_block["time"]
|
||||
|
||||
for i in range(0, 2):
|
||||
blocks.append(create_block(
|
||||
hashprev=int(genesis_block["hash"], 16),
|
||||
tmpl={"height": start_height + 1,
|
||||
# Make sure each block has a different hash.
|
||||
"curtime": prev_time + i + 1,
|
||||
}
|
||||
))
|
||||
blocks[-1].solve()
|
||||
|
||||
# Send blocks and test the last one is not connected
|
||||
self.log.info('Send A1 and A2. Make sure that only the former connects')
|
||||
peer.send_blocks_and_test([blocks[0]], node, success=True)
|
||||
peer.send_blocks_and_test([blocks[1]], node, success=False)
|
||||
|
||||
self.log.info('Restart the node and check that the best tip before restarting matched the ones afterwards')
|
||||
# Restart and check enough times for this to eventually fail if the logic is broken
|
||||
for _ in range(10):
|
||||
self.restart_node(0)
|
||||
assert_equal(blocks[0].hash_hex, node.getbestblockhash())
|
||||
|
||||
def run_test(self):
|
||||
self.test_chain_split_in_memory()
|
||||
self.test_chain_split_from_disk()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
ChainTiebreaksTest(__file__).main()
|
||||
@ -866,8 +866,8 @@ class P2PDataStore(P2PInterface):
|
||||
- the on_getheaders handler will ensure that any getheaders are responded to
|
||||
- if force_send is False: wait for getdata for each of the blocks. The on_getdata handler will
|
||||
ensure that any getdata messages are responded to. Otherwise send the full block unsolicited.
|
||||
- if success is True: assert that the node's tip advances to the most recent block
|
||||
- if success is False: assert that the node's tip doesn't advance
|
||||
- if success is True: assert that the node's tip is the last block in blocks at the end of the operation.
|
||||
- if success is False: assert that the node's tip isn't the last block in blocks at the end of the operation
|
||||
- if reject_reason is set: assert that the correct reject message is logged"""
|
||||
|
||||
with p2p_lock:
|
||||
|
||||
@ -324,6 +324,7 @@ BASE_SCRIPTS = [
|
||||
'feature_includeconf.py',
|
||||
'feature_addrman.py',
|
||||
'feature_asmap.py',
|
||||
'feature_chain_tiebreaks.py',
|
||||
'feature_fastprune.py',
|
||||
'feature_framework_miniwallet.py',
|
||||
'mempool_unbroadcast.py',
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user