From 0552a15b0b11a12780bb55b1ddee6633719ee536 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 14 Jun 2021 01:34:23 -0400 Subject: [PATCH 001/116] feat: add to_bitcoin_hash() method for BurnchainHeaderHash, which flips the endianness of the hash --- src/burnchains/bitcoin/bits.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/burnchains/bitcoin/bits.rs b/src/burnchains/bitcoin/bits.rs index 899b75e981..5b04e6ba5d 100644 --- a/src/burnchains/bitcoin/bits.rs +++ b/src/burnchains/bitcoin/bits.rs @@ -570,9 +570,19 @@ impl BurnchainHeaderHash { /// Instantiate a burnchain block hash from a Bitcoin block header pub fn from_bitcoin_hash(bitcoin_hash: &Sha256dHash) -> BurnchainHeaderHash { // NOTE: Sha256dhash is the same size as BurnchainHeaderHash, so this should never panic + // Bitcoin stores its hashes in big-endian form, but our codebase stores them in + // little-endian form (which is also how most libraries work). BurnchainHeaderHash::from_bytes_be(bitcoin_hash.as_bytes()).unwrap() } + pub fn to_bitcoin_hash(&self) -> Sha256dHash { + let mut bytes = self.0.to_vec(); + bytes.reverse(); + let mut buf = [0u8; 32]; + buf.copy_from_slice(&bytes[0..32]); + Sha256dHash(buf) + } + pub fn zero() -> BurnchainHeaderHash { BurnchainHeaderHash([0x00; 32]) } From 5e45a9465dcfeb910a10c97d33485ee3665b0b48 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 14 Jun 2021 01:35:09 -0400 Subject: [PATCH 002/116] refactoring: move reward cycle and block height converstion methods to PoxConstants --- src/burnchains/mod.rs | 47 ++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 44 insertions(+), 3 deletions(-) diff --git a/src/burnchains/mod.rs b/src/burnchains/mod.rs index ca1a290b63..ca3f0a2a96 100644 --- a/src/burnchains/mod.rs +++ b/src/burnchains/mod.rs @@ -54,6 +54,7 @@ use self::bitcoin::{ }; /// This module contains drivers and types for all burn chains we support. +pub mod affirmation; pub mod bitcoin; pub mod burnchain; pub mod db; @@ -402,6 +403,44 @@ impl PoxConstants { BITCOIN_REGTEST_FIRST_BLOCK_HEIGHT + POX_SUNSET_END, ) } + + pub fn is_reward_cycle_start(&self, first_block_height: u64, burn_height: u64) -> bool { + let effective_height = burn_height - first_block_height; + // first block of the new reward cycle + (effective_height % (self.reward_cycle_length as u64)) == 1 + } + + pub fn reward_cycle_to_block_height(&self, first_block_height: u64, reward_cycle: u64) -> u64 { + // NOTE: the `+ 1` is because the height of the first block of a reward cycle is mod 1, not + // mod 0. + first_block_height + reward_cycle * (self.reward_cycle_length as u64) + 1 + } + + pub fn block_height_to_reward_cycle( + &self, + first_block_height: u64, + block_height: u64, + ) -> Option { + if block_height < first_block_height { + return None; + } + Some((block_height - first_block_height) / (self.reward_cycle_length as u64)) + } + + pub fn is_in_prepare_phase(&self, first_block_height: u64, block_height: u64) -> bool { + if block_height <= first_block_height { + // not a reward cycle start if we're the first block after genesis. + false + } else { + let effective_height = block_height - first_block_height; + let reward_index = effective_height % (self.reward_cycle_length as u64); + + // NOTE: first block in reward cycle is mod 1, so mod 0 is the last block in the + // prepare phase. + reward_index == 0 + || reward_index > ((self.reward_cycle_length - self.prepare_length) as u64) + } + } } /// Structure for encoding our view of the network @@ -569,6 +608,7 @@ pub mod test { use std::collections::HashMap; use address::*; + use burnchains::bitcoin::indexer::BitcoinIndexer; use burnchains::db::*; use burnchains::Burnchain; use burnchains::*; @@ -1079,8 +1119,6 @@ pub mod test { ); let block = BurnchainBlock::Bitcoin(mock_bitcoin_block); - // this is basically lifted verbatum from Burnchain::process_block_ops() - test_debug!( "Process block {} {}", block.block_height(), @@ -1098,6 +1136,8 @@ pub mod test { let blockstack_txs = self.txs.clone(); + let burnchain_db = BurnchainDB::open(&burnchain.get_burnchaindb_path(), true).unwrap(); + let new_snapshot = sortition_db_handle .process_block_txs( &parent_snapshot, @@ -1147,11 +1187,12 @@ pub mod test { ); let header = block.header(); + let indexer: BitcoinIndexer = burnchain.make_indexer().unwrap(); let mut burnchain_db = BurnchainDB::open(&burnchain.get_burnchaindb_path(), true).unwrap(); burnchain_db - .raw_store_burnchain_block(header.clone(), self.txs.clone()) + .raw_store_burnchain_block(burnchain, &indexer, header.clone(), self.txs.clone()) .unwrap(); coord.handle_new_burnchain_block().unwrap(); From a25f1d06603e727c877c6b81dc42bf56baf79198 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 14 Jun 2021 01:35:52 -0400 Subject: [PATCH 003/116] fix: while testing, log the PoX bit vector and ancestor consensus hashes when calculating the next consensus hash --- src/chainstate/burn/mod.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/chainstate/burn/mod.rs b/src/chainstate/burn/mod.rs index 8aa5437ba6..578fea8b67 100644 --- a/src/chainstate/burn/mod.rs +++ b/src/chainstate/burn/mod.rs @@ -282,6 +282,8 @@ impl ConsensusHash { let mut ch_bytes = [0u8; 20]; ch_bytes.copy_from_slice(r160.result().as_slice()); + + test_debug!("Consensus hash {} from burn hash {}, ops-hash {}, total_burn {}, pox-id {}, priors: {:?}", &ConsensusHash(ch_bytes.clone()), burn_header_hash, opshash, total_burn, pox_id, prev_consensus_hashes); ConsensusHash(ch_bytes) } @@ -469,6 +471,7 @@ mod tests { canonical_stacks_tip_height: 0, canonical_stacks_tip_hash: BlockHeaderHash([0u8; 32]), canonical_stacks_tip_consensus_hash: ConsensusHash([0u8; 20]), + ..BlockSnapshot::initial(0, &first_burn_hash, 0) }; let mut tx = SortitionHandleTx::begin(&mut db, &prev_snapshot.sortition_id).unwrap(); From 411ab277d3a3fab8a245a644289afeb4ca8b57ca Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 14 Jun 2021 01:36:29 -0400 Subject: [PATCH 004/116] refactor: wrap PoxConstants methods for block height and reward cycle conversion, and require that indexer instances be owned by the burnchain synchronization methods. --- src/burnchains/burnchain.rs | 138 ++++++++++++++++++++---------------- 1 file changed, 76 insertions(+), 62 deletions(-) diff --git a/src/burnchains/burnchain.rs b/src/burnchains/burnchain.rs index 3b129f6ba0..963924512e 100644 --- a/src/burnchains/burnchain.rs +++ b/src/burnchains/burnchain.rs @@ -18,6 +18,7 @@ use std::collections::HashMap; use std::collections::HashSet; use std::convert::TryFrom; use std::fs; +use std::marker::Send; use std::path::PathBuf; use std::sync::mpsc::sync_channel; use std::sync::{ @@ -31,13 +32,14 @@ use crate::types::chainstate::StacksAddress; use crate::types::proof::TrieHash; use address::public_keys_to_address_hash; use address::AddressHashMode; +use burnchains::affirmation::update_pox_affirmation_maps; use burnchains::bitcoin::address::address_type_to_version_byte; use burnchains::bitcoin::address::to_c32_version_byte; use burnchains::bitcoin::address::BitcoinAddress; use burnchains::bitcoin::address::BitcoinAddressType; use burnchains::bitcoin::BitcoinNetworkType; use burnchains::bitcoin::{BitcoinInputType, BitcoinTxInput, BitcoinTxOutput}; -use burnchains::db::BurnchainDB; +use burnchains::db::{BurnchainDB, BurnchainHeaderReader}; use burnchains::indexer::{ BurnBlockIPC, BurnHeaderIPC, BurnchainBlockDownloader, BurnchainBlockParser, BurnchainIndexer, }; @@ -493,42 +495,23 @@ impl Burnchain { } pub fn is_reward_cycle_start(&self, burn_height: u64) -> bool { - let effective_height = burn_height - self.first_block_height; - // first block of the new reward cycle - (effective_height % (self.pox_constants.reward_cycle_length as u64)) == 1 + self.pox_constants + .is_reward_cycle_start(self.first_block_height, burn_height) } pub fn reward_cycle_to_block_height(&self, reward_cycle: u64) -> u64 { - // NOTE: the `+ 1` is because the height of the first block of a reward cycle is mod 1, not - // mod 0. - self.first_block_height + reward_cycle * (self.pox_constants.reward_cycle_length as u64) + 1 + self.pox_constants + .reward_cycle_to_block_height(self.first_block_height, reward_cycle) } pub fn block_height_to_reward_cycle(&self, block_height: u64) -> Option { - if block_height < self.first_block_height { - return None; - } - Some( - (block_height - self.first_block_height) - / (self.pox_constants.reward_cycle_length as u64), - ) + self.pox_constants + .block_height_to_reward_cycle(self.first_block_height, block_height) } pub fn is_in_prepare_phase(&self, block_height: u64) -> bool { - if block_height <= self.first_block_height { - // not a reward cycle start if we're the first block after genesis. - false - } else { - let effective_height = block_height - self.first_block_height; - let reward_index = effective_height % (self.pox_constants.reward_cycle_length as u64); - - // NOTE: first block in reward cycle is mod 1, so mod 0 is the last block in the - // prepare phase. - reward_index == 0 - || reward_index - > ((self.pox_constants.reward_cycle_length - self.pox_constants.prepare_length) - as u64) - } + self.pox_constants + .is_in_prepare_phase(self.first_block_height, block_height) } pub fn regtest(working_dir: &str) -> Burnchain { @@ -579,7 +562,9 @@ impl Burnchain { Ok(()) } - pub fn make_indexer(&self) -> Result { + pub fn make_indexer( + &self, + ) -> Result { Burnchain::setup_chainstate_dirs(&self.working_dir)?; let indexer: I = BurnchainIndexer::init( @@ -653,13 +638,7 @@ impl Burnchain { &epochs, readwrite, )?; - let burnchaindb = BurnchainDB::connect( - &burnchain_db_path, - self.first_block_height, - &first_block_header_hash, - first_block_header_timestamp, - readwrite, - )?; + let burnchaindb = BurnchainDB::connect(&burnchain_db_path, self, readwrite)?; Ok((sortitiondb, burnchaindb)) } @@ -866,9 +845,12 @@ impl Burnchain { } /// Top-level entry point to check and process a block. - pub fn process_block( + /// NOTE: you must call this in order by burnchain blocks in the burnchain -- i.e. process the + /// parent before any children. + pub fn process_block( burnchain: &Burnchain, burnchain_db: &mut BurnchainDB, + indexer: &B, block: &BurnchainBlock, ) -> Result { debug!( @@ -877,7 +859,28 @@ impl Burnchain { &block.block_hash() ); - let _blockstack_txs = burnchain_db.store_new_burnchain_block(burnchain, &block)?; + burnchain_db.store_new_burnchain_block(burnchain, indexer, &block)?; + let block_height = block.block_height(); + + let this_reward_cycle = burnchain + .block_height_to_reward_cycle(block_height) + .unwrap_or(0); + + let prev_reward_cycle = burnchain + .block_height_to_reward_cycle(block_height.saturating_sub(1)) + .unwrap_or(0); + + if this_reward_cycle != prev_reward_cycle { + // at reward cycle boundary + debug!( + "Update PoX affirmation maps for reward cycle {} ({}) block {} cycle-length {}", + prev_reward_cycle, + this_reward_cycle, + block_height, + burnchain.pox_constants.reward_cycle_length + ); + update_pox_affirmation_maps(burnchain_db, indexer, prev_reward_cycle, burnchain)?; + } let header = block.header(); @@ -886,10 +889,11 @@ impl Burnchain { /// Hand off the block to the ChainsCoordinator _and_ process the sortition /// *only* to be used by legacy stacks node interfaces, like the Helium node - pub fn process_block_and_sortition_deprecated( + pub fn process_block_and_sortition_deprecated( db: &mut SortitionDB, burnchain_db: &mut BurnchainDB, burnchain: &Burnchain, + indexer: &B, block: &BurnchainBlock, ) -> Result<(BlockSnapshot, BurnchainStateTransition), burnchain_error> { debug!( @@ -899,10 +903,11 @@ impl Burnchain { ); let header = block.header(); - let blockstack_txs = burnchain_db.store_new_burnchain_block(burnchain, &block)?; + let blockstack_txs = burnchain_db.store_new_burnchain_block(burnchain, indexer, &block)?; let sortition_tip = SortitionDB::get_canonical_sortition_tip(db.conn())?; + // extract block-commit metadata db.evaluate_sortition(&header, blockstack_txs, burnchain, &sortition_tip, None) .map(|(snapshot, transition, _)| (snapshot, transition)) } @@ -947,15 +952,15 @@ impl Burnchain { /// Top-level burnchain sync. /// Returns new latest block height. - pub fn sync( + pub fn sync( &mut self, + indexer: I, comms: &CoordinatorChannels, target_block_height_opt: Option, max_blocks_opt: Option, ) -> Result { - let mut indexer: I = self.make_indexer()?; let chain_tip = self.sync_with_indexer( - &mut indexer, + indexer, comms.clone(), target_block_height_opt, max_blocks_opt, @@ -967,12 +972,14 @@ impl Burnchain { /// Deprecated top-level burnchain sync. /// Returns (snapshot of new burnchain tip, last state-transition processed if any) /// If this method returns Err(burnchain_error::TrySyncAgain), then call this method again. - pub fn sync_with_indexer_deprecated( + pub fn sync_with_indexer_deprecated< + I: BurnchainIndexer + BurnchainHeaderReader + 'static + Send, + >( &mut self, - indexer: &mut I, + mut indexer: I, ) -> Result<(BlockSnapshot, Option), burnchain_error> { - self.setup_chainstate(indexer)?; - let (mut sortdb, mut burnchain_db) = self.connect_db(indexer, true)?; + self.setup_chainstate(&mut indexer)?; + let (mut sortdb, mut burnchain_db) = self.connect_db(&indexer, true)?; let burn_chain_tip = burnchain_db.get_canonical_chain_tip().map_err(|e| { error!("Failed to query burn chain tip from burn DB: {}", e); e @@ -991,7 +998,7 @@ impl Burnchain { // handle reorgs let orig_header_height = indexer.get_headers_height()?; // 1-indexed - let sync_height = Burnchain::sync_reorg(indexer)?; + let sync_height = Burnchain::sync_reorg(&mut indexer)?; if sync_height + 1 < orig_header_height { // a reorg happened warn!( @@ -1034,6 +1041,7 @@ impl Burnchain { let mut downloader = indexer.downloader(); let mut parser = indexer.parser(); + let input_headers = indexer.read_headers(start_block + 1, end_block + 1)?; let burnchain_config = self.clone(); @@ -1105,6 +1113,7 @@ impl Burnchain { &mut sortdb, &mut burnchain_db, &burnchain_config, + &indexer, &burnchain_block, )?; last_processed = (tip, Some(transition)); @@ -1120,7 +1129,6 @@ impl Burnchain { }); // feed the pipeline! - let input_headers = indexer.read_headers(start_block + 1, end_block + 1)?; let mut downloader_result: Result<(), burnchain_error> = Ok(()); for i in 0..input_headers.len() { debug!( @@ -1179,17 +1187,17 @@ impl Burnchain { /// If this method returns Err(burnchain_error::TrySyncAgain), then call this method again. pub fn sync_with_indexer( &mut self, - indexer: &mut I, + mut indexer: I, coord_comm: CoordinatorChannels, target_block_height_opt: Option, max_blocks_opt: Option, should_keep_running: Option>, ) -> Result where - I: BurnchainIndexer + 'static, + I: BurnchainIndexer + BurnchainHeaderReader + 'static + Send, { - self.setup_chainstate(indexer)?; - let (_, mut burnchain_db) = self.connect_db(indexer, true)?; + self.setup_chainstate(&mut indexer)?; + let (_, mut burnchain_db) = self.connect_db(&mut indexer, true)?; let burn_chain_tip = burnchain_db.get_canonical_chain_tip().map_err(|e| { error!("Failed to query burn chain tip from burn DB: {}", e); e @@ -1199,7 +1207,7 @@ impl Burnchain { // handle reorgs let orig_header_height = indexer.get_headers_height()?; // 1-indexed - let sync_height = Burnchain::sync_reorg(indexer)?; + let sync_height = Burnchain::sync_reorg(&mut indexer)?; if sync_height + 1 < orig_header_height { // a reorg happened warn!( @@ -1252,8 +1260,8 @@ impl Burnchain { debug!("Nothing to do; already have blocks up to {}", end_block); let bhh = BurnchainHeaderHash::from_bitcoin_hash(&BitcoinSha256dHash(hdr.header_hash())); - return burnchain_db - .get_burnchain_block(&bhh) + + return BurnchainDB::get_burnchain_block(burnchain_db.conn(), &bhh) .map(|block_data| block_data.header); } } @@ -1279,6 +1287,7 @@ impl Burnchain { let mut parser = indexer.parser(); let myself = self.clone(); + let input_headers = indexer.read_headers(start_block + 1, end_block + 1)?; // TODO: don't re-process blocks. See if the block hash is already present in the burn db, // and if so, do nothing. @@ -1374,7 +1383,7 @@ impl Burnchain { let insert_start = get_epoch_time_ms(); last_processed = - Burnchain::process_block(&myself, &mut burnchain_db, &burnchain_block)?; + Burnchain::process_block(&myself, &mut burnchain_db, &indexer, &burnchain_block)?; if !coord_comm.announce_new_burn_block() { return Err(burnchain_error::CoordinatorClosed); } @@ -1391,7 +1400,6 @@ impl Burnchain { .unwrap(); // feed the pipeline! - let input_headers = indexer.read_headers(start_block + 1, end_block + 1)?; let mut downloader_result: Result<(), burnchain_error> = Ok(()); for i in 0..input_headers.len() { debug!( @@ -1459,6 +1467,7 @@ pub mod tests { use crate::types::chainstate::StacksAddress; use crate::types::proof::TrieHash; use address::AddressHashMode; + use burnchains::affirmation::*; use burnchains::bitcoin::address::*; use burnchains::bitcoin::keys::BitcoinPublicKey; use burnchains::bitcoin::*; @@ -1961,11 +1970,13 @@ pub mod tests { canonical_stacks_tip_height: 0, canonical_stacks_tip_hash: BlockHeaderHash([0u8; 32]), canonical_stacks_tip_consensus_hash: ConsensusHash([0u8; 20]), + ..BlockSnapshot::initial(0, &first_burn_hash, 0) }; - let block_ops_122 = vec![BlockstackOperationType::LeaderKeyRegister( - leader_key_2.clone(), - )]; + let block_ops_122: Vec = + vec![BlockstackOperationType::LeaderKeyRegister( + leader_key_2.clone(), + )]; let block_opshash_122 = OpsHash::from_txids(&vec![leader_key_2.txid.clone()]); let block_prev_chs_122 = vec![ block_121_snapshot.consensus_hash.clone(), @@ -2009,9 +2020,10 @@ pub mod tests { canonical_stacks_tip_height: 0, canonical_stacks_tip_hash: BlockHeaderHash([0u8; 32]), canonical_stacks_tip_consensus_hash: ConsensusHash([0u8; 20]), + ..BlockSnapshot::initial(0, &first_burn_hash, 0) }; - let block_ops_123 = vec![ + let block_ops_123: Vec = vec![ BlockstackOperationType::UserBurnSupport(user_burn_noblock.clone()), BlockstackOperationType::UserBurnSupport(user_burn_nokey.clone()), BlockstackOperationType::LeaderKeyRegister(leader_key_1.clone()), @@ -2063,6 +2075,7 @@ pub mod tests { canonical_stacks_tip_height: 0, canonical_stacks_tip_hash: BlockHeaderHash([0u8; 32]), canonical_stacks_tip_consensus_hash: ConsensusHash([0u8; 20]), + ..BlockSnapshot::initial(0, &first_burn_hash, 0) }; // multiple possibilities for block 124 -- we'll reorg the chain each time back to 123 and @@ -2261,6 +2274,7 @@ pub mod tests { canonical_stacks_tip_height: 0, canonical_stacks_tip_hash: BlockHeaderHash([0u8; 32]), canonical_stacks_tip_consensus_hash: ConsensusHash([0u8; 20]), + ..BlockSnapshot::initial(0, &first_burn_hash, 0) }; if next_sortition { From 0eb76b3f4d80207f12a5566d738238a0f1e40871 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 14 Jun 2021 01:37:23 -0400 Subject: [PATCH 005/116] refactor: make write_block_headers() public --- src/burnchains/bitcoin/spv.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/burnchains/bitcoin/spv.rs b/src/burnchains/bitcoin/spv.rs index 508ea7a4a7..7b3dd87043 100644 --- a/src/burnchains/bitcoin/spv.rs +++ b/src/burnchains/bitcoin/spv.rs @@ -556,7 +556,7 @@ impl SpvClient { /// Write a run of continuous headers to a particular location. /// Does _not_ check for continuity! - fn write_block_headers( + pub fn write_block_headers( &mut self, height: u64, headers: Vec, From 7b6652cd6bbf7ebe24132f2d784e64c04bfa2b5c Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 14 Jun 2021 01:37:43 -0400 Subject: [PATCH 006/116] feat: implement BurnchainHeaderReader trait for the BitcoinIndexer, so it can be used when deducing the PoX anchor block from burnchain state. --- src/burnchains/bitcoin/indexer.rs | 65 +++++++++++++++++++++++++++++-- 1 file changed, 62 insertions(+), 3 deletions(-) diff --git a/src/burnchains/bitcoin/indexer.rs b/src/burnchains/bitcoin/indexer.rs index 9b2c7085e1..7040c1bf38 100644 --- a/src/burnchains/bitcoin/indexer.rs +++ b/src/burnchains/bitcoin/indexer.rs @@ -31,21 +31,26 @@ use burnchains::bitcoin::blocks::BitcoinHeaderIPC; use burnchains::bitcoin::messages::BitcoinMessageHandler; use burnchains::bitcoin::spv::*; use burnchains::bitcoin::Error as btc_error; +use burnchains::db::BurnchainHeaderReader; use burnchains::indexer::BurnchainIndexer; use burnchains::indexer::*; use burnchains::Burnchain; +use util::db::Error as DBError; use burnchains::bitcoin::blocks::{BitcoinBlockDownloader, BitcoinBlockParser}; use burnchains::bitcoin::BitcoinNetworkType; use crate::types::chainstate::BurnchainHeaderHash; +use burnchains::BurnchainBlockHeader; use burnchains::Error as burnchain_error; use burnchains::MagicBytes; use burnchains::BLOCKSTACK_MAGIC_MAINNET; -use deps::bitcoin::blockdata::block::LoneBlockHeader; +use deps::bitcoin::blockdata::block::{BlockHeader, LoneBlockHeader}; +use deps::bitcoin::network::encodable::VarInt; use deps::bitcoin::network::message::NetworkMessage; use deps::bitcoin::network::serialize::BitcoinHash; +use deps::bitcoin::util::hash::Sha256dHash; use util::log; @@ -764,6 +769,31 @@ impl BitcoinIndexer { Ok(new_tip) } + + #[cfg(test)] + pub fn raw_store_header(&mut self, header: BurnchainBlockHeader) -> Result<(), btc_error> { + let mut spv_client = SpvClient::new( + &self.config.spv_headers_path, + self.config.first_block, + None, + self.runtime.network_id, + true, + false, + )?; + let hdr = LoneBlockHeader { + header: BlockHeader { + bits: 0, + merkle_root: Sha256dHash([0u8; 32]), + nonce: 0, + prev_blockhash: header.parent_block_hash.to_bitcoin_hash(), + time: header.timestamp as u32, + version: 0x20000000, + }, + tx_count: VarInt(header.num_txs), + }; + spv_client.write_block_headers(header.block_height, vec![hdr])?; + Ok(()) + } } impl Drop for BitcoinIndexer { @@ -814,7 +844,7 @@ impl BurnchainIndexer for BitcoinIndexer { .map_err(burnchain_error::Bitcoin)?; } - let mut indexer = BitcoinIndexer::from_file(bitcoin_network_id, &conf_path_str) + let indexer = BitcoinIndexer::from_file(bitcoin_network_id, &conf_path_str) .map_err(burnchain_error::Bitcoin)?; SpvClient::new( @@ -827,7 +857,7 @@ impl BurnchainIndexer for BitcoinIndexer { ) .map_err(burnchain_error::Bitcoin)?; - indexer.connect()?; + // indexer.connect()?; Ok(indexer) } @@ -1014,6 +1044,35 @@ impl BurnchainIndexer for BitcoinIndexer { } } +impl BurnchainHeaderReader for BitcoinIndexer { + fn read_burnchain_headers( + &self, + start_height: u64, + end_height: u64, + ) -> Result, DBError> { + let hdrs = self + .read_headers(start_height, end_height) + .map_err(|e| DBError::Other(format!("Burnchain error: {:?}", &e)))?; + + Ok(hdrs + .into_iter() + .map(|hdr| BurnchainBlockHeader { + block_height: hdr.block_height, + block_hash: BurnchainHeaderHash::from_bitcoin_hash(&Sha256dHash(hdr.header_hash())), + parent_block_hash: BurnchainHeaderHash::from_bitcoin_hash( + &hdr.block_header.header.prev_blockhash, + ), + num_txs: hdr.block_header.tx_count.0, + timestamp: hdr.block_header.header.time as u64, + }) + .collect()) + } + fn get_burnchain_headers_height(&self) -> Result { + self.get_headers_height() + .map_err(|e| DBError::Other(format!("Burnchain error: {:?}", &e))) + } +} + #[cfg(test)] mod test { use super::*; From 8701eb76f8a4f1a20153fe875e143fcc6d7de358 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 14 Jun 2021 01:38:31 -0400 Subject: [PATCH 007/116] feat: introduce the AffirmationMap data structure, which identifies a history of network affirmations made on the status of prior PoX anchor blocks. Implement all of the logic required to scan a reward cycle from burnchain block commits, deduce which block-commit is the anchor block, and tag each block-commit with the affirmation map it represents. Also, add lots of unit tests for this! --- src/burnchains/affirmation.rs | 3315 +++++++++++++++++++++++++++++++++ 1 file changed, 3315 insertions(+) create mode 100644 src/burnchains/affirmation.rs diff --git a/src/burnchains/affirmation.rs b/src/burnchains/affirmation.rs new file mode 100644 index 0000000000..5a69adfb68 --- /dev/null +++ b/src/burnchains/affirmation.rs @@ -0,0 +1,3315 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2021 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::cmp; +use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; +use std::convert::{TryFrom, TryInto}; +use std::fmt; +use std::sync::mpsc::SyncSender; +use std::time::Duration; + +use burnchains::{ + db::{BurnchainBlockData, BurnchainDB, BurnchainDBTransaction, BurnchainHeaderReader}, + Address, Burnchain, BurnchainBlockHeader, Error, PoxConstants, Txid, +}; +use chainstate::burn::{ + db::sortdb::SortitionDB, + operations::leader_block_commit::{RewardSetInfo, BURN_BLOCK_MINED_AT_MODULUS}, + operations::BlockstackOperationType, + operations::LeaderBlockCommitOp, + BlockSnapshot, ConsensusHash, +}; +use util::db::DBConn; +use util::db::Error as DBError; + +use core::StacksEpochId; + +use crate::types::chainstate::{ + BlockHeaderHash, BurnchainHeaderHash, PoxId, SortitionId, StacksAddress, StacksBlockHeader, + StacksBlockId, +}; +use crate::util::boot::boot_code_id; + +/// Affirmation map entries. By building on a PoX-mined block, +/// a PoB-mined block (in a PoX reward cycle), +/// or no block in reward cycle _i_, a sortition's miner +/// affirms something about the status of the ancestral anchor blocks. +#[derive(Clone, Copy, PartialEq, Eq, Hash)] +pub enum AffirmationMapEntry { + PoxAnchorBlockPresent, + PoxAnchorBlockAbsent, + Nothing, +} + +impl AffirmationMapEntry { + pub fn parse(s: &str) -> Option { + if s.len() != 1 { + return None; + } + + for chr in s.chars() { + let next = match chr { + 'p' => AffirmationMapEntry::PoxAnchorBlockPresent, + 'a' => AffirmationMapEntry::PoxAnchorBlockAbsent, + 'n' => AffirmationMapEntry::Nothing, + _ => { + return None; + } + }; + return Some(next); + } + return None; + } +} + +#[derive(Clone, PartialEq)] +pub struct AffirmationMap { + affirmations: Vec, +} + +impl fmt::Display for AffirmationMapEntry { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + AffirmationMapEntry::PoxAnchorBlockPresent => write!(f, "p"), + AffirmationMapEntry::PoxAnchorBlockAbsent => write!(f, "a"), + AffirmationMapEntry::Nothing => write!(f, "n"), + } + } +} + +impl fmt::Debug for AffirmationMapEntry { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(&format!("{}", &self)) + } +} + +impl fmt::Display for AffirmationMap { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "AM(")?; + for entry in self.affirmations.iter() { + write!(f, "{}", &entry)?; + } + write!(f, ")") + } +} + +impl fmt::Debug for AffirmationMap { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(&format!("{}", &self)) + } +} + +impl AffirmationMap { + pub fn new(entries: Vec) -> AffirmationMap { + AffirmationMap { + affirmations: entries, + } + } + + pub fn empty() -> AffirmationMap { + AffirmationMap { + affirmations: vec![], + } + } + + pub fn at(&self, reward_cycle: u64) -> Option { + if reward_cycle >= self.affirmations.len() as u64 { + None + } else { + Some(self.affirmations[reward_cycle as usize]) + } + } + + pub fn push(&mut self, entry: AffirmationMapEntry) { + self.affirmations.push(entry) + } + + pub fn pop(&mut self) -> Option { + self.affirmations.pop() + } + + pub fn len(&self) -> usize { + self.affirmations.len() + } + + pub fn reward_cycle(&self) -> u64 { + self.affirmations.len() as u64 + } + + pub fn as_slice(&self) -> &[AffirmationMapEntry] { + &self.affirmations + } + + // used to store to database + pub fn encode(&self) -> String { + let mut ret = vec![]; + for entry in self.affirmations.iter() { + ret.push(format!("{}", entry)); + } + ret.join("") + } + + // used for database from-row + pub fn decode(s: &str) -> Option { + if !s.is_ascii() { + return None; + } + + let mut affirmations = vec![]; + for chr in s.chars() { + let next = match chr { + 'p' => AffirmationMapEntry::PoxAnchorBlockPresent, + 'a' => AffirmationMapEntry::PoxAnchorBlockAbsent, + 'n' => AffirmationMapEntry::Nothing, + _ => { + return None; + } + }; + affirmations.push(next); + } + Some(AffirmationMap { affirmations }) + } + + /// Has `other` diverged from `self`? + /// If `other` contains a reward cycle affirmation that is not present in `self`, then yes. + /// (Note that this means that if `other` is a prefix of `self`, then no divergence). + /// Return the index into `other` where the affirmation differs from `self`. + pub fn find_divergence(&self, other: &AffirmationMap) -> Option { + for i in 0..cmp::min(self.len(), other.len()) { + if self.affirmations[i] != other.affirmations[i] { + return Some(i as u64); + } + } + + if other.len() > self.len() { + return Some(self.len() as u64); + } + + None + } + + /// What is the PoX ID if this affirmation map? + /// This is a surjective mapping: `n` and `p` are 1, and `a` is 0 + pub fn as_pox_id(&self) -> PoxId { + let mut pox_id = PoxId::initial(); + + // affirmation maps are statements out prepare phases, not about the reward cycle's anchor + // block status. So, account for the first reward cycle, which has no anchor block. + pox_id.extend_with_present_block(); + + for affirmation in self.affirmations.iter() { + match affirmation { + AffirmationMapEntry::PoxAnchorBlockAbsent => { + pox_id.extend_with_not_present_block(); + } + _ => { + pox_id.extend_with_present_block(); + } + } + } + pox_id + } + + /// What is the weight of this affirmation map? + /// i.e. how many times did the network either affirm an anchor block, or made no election? + pub fn weight(&self) -> u64 { + let mut weight = 0; + for i in 0..self.len() { + match self.affirmations[i] { + AffirmationMapEntry::PoxAnchorBlockAbsent => {} + _ => { + weight += 1; + } + } + } + weight + } +} + +/// Get a parent/child reward cycle. Only return Some(..) if the reward cycle is known for both -- +/// i.e. their block heights are plausible. +pub fn get_parent_child_reward_cycles( + parent: &LeaderBlockCommitOp, + block_commit: &LeaderBlockCommitOp, + burnchain: &Burnchain, +) -> Option<(u64, u64)> { + let child_reward_cycle = match burnchain.block_height_to_reward_cycle(block_commit.block_height) + { + Some(crc) => crc, + None => return None, + }; + + let parent_reward_cycle = match burnchain.block_height_to_reward_cycle(parent.block_height) { + Some(prc) => prc, + None => { + if parent.block_height == 0 && parent.vtxindex == 0 { + // this is a first block commit + 0 + } else { + return None; + } + } + }; + + test_debug!( + "{},{} is rc={},rc={}", + parent.block_height, + block_commit.block_height, + parent_reward_cycle, + child_reward_cycle + ); + Some((parent_reward_cycle, child_reward_cycle)) +} + +/// Read a range of blockstack operations for a prepare phase of a given reward cycle. +/// Only includes block-commits +pub fn read_prepare_phase_commits<'a, B: BurnchainHeaderReader>( + burnchain_tx: &BurnchainDBTransaction<'a>, + indexer: &B, + pox_consts: &PoxConstants, + first_block_height: u64, + reward_cycle: u64, +) -> Result>, Error> { + let start_height = pox_consts + .reward_cycle_to_block_height(first_block_height, reward_cycle + 1) + - (pox_consts.prepare_length as u64); + let end_height = start_height + (pox_consts.prepare_length as u64); + let headers = indexer.read_burnchain_headers(start_height, end_height)?; + let _num_headers = headers.len(); + + let mut ret = vec![]; + for header in headers.into_iter() { + let blk = BurnchainDB::get_burnchain_block(&burnchain_tx.conn(), &header.block_hash) + .expect(&format!( + "BUG: failed to load prepare-phase block {} ({})", + &header.block_hash, header.block_height + )); + + let mut block_ops = vec![]; + for op in blk.ops.into_iter() { + assert!(pox_consts.is_in_prepare_phase(first_block_height, op.block_height())); + match op { + BlockstackOperationType::LeaderBlockCommit(opdata) => { + // basic validity filtering + if opdata.block_height < first_block_height { + test_debug!("Skip too-early block commit"); + continue; + } + if (opdata.parent_block_ptr as u64) < first_block_height { + if opdata.parent_block_ptr != 0 || opdata.parent_vtxindex != 0 { + test_debug!("Skip orphaned block-commit"); + continue; + } + } + if opdata.block_height <= opdata.parent_block_ptr as u64 { + test_debug!("Skip block-commit whose 'parent' comes at or after it"); + continue; + } + if opdata.burn_fee == 0 { + test_debug!("Skip block-commit without burn"); + continue; + } + block_ops.push(opdata); + } + _ => { + continue; + } + } + } + block_ops.sort_by(|op1, op2| { + if op1.block_height != op2.block_height { + op1.block_height.cmp(&op2.block_height) + } else { + op1.vtxindex.cmp(&op2.vtxindex) + } + }); + ret.push(block_ops); + } + + test_debug!( + "Read {} headers, {} prepare-phase commits from reward cycle {} ({}-{})", + _num_headers, + ret.len(), + reward_cycle, + start_height, + end_height + ); + Ok(ret) +} + +/// Find all referenced parent block-commits already in the burnchain DB, so we can extract their VRF seeds. +pub fn read_parent_block_commits<'a, B: BurnchainHeaderReader>( + burnchain_tx: &BurnchainDBTransaction<'a>, + indexer: &B, + prepare_phase_ops: &Vec>, +) -> Result, Error> { + let mut parents = HashMap::new(); + for ops in prepare_phase_ops.iter() { + for opdata in ops.iter() { + let mut hdrs = indexer.read_burnchain_headers( + opdata.parent_block_ptr as u64, + (opdata.parent_block_ptr + 1) as u64, + )?; + let hdr = match hdrs.len() { + 1 => hdrs.pop().expect("BUG: pop() failure on non-empty vector"), + _ => { + test_debug!( + "Orphan block commit {},{},{}: no such block {}", + &opdata.txid, + opdata.block_height, + opdata.vtxindex, + opdata.parent_block_ptr + ); + continue; + } + }; + + test_debug!("Get header at {}: {:?}", opdata.parent_block_ptr, &hdr); + assert_eq!(hdr.block_height, opdata.parent_block_ptr as u64); + + let mut found = false; + let blk = BurnchainDB::get_burnchain_block(burnchain_tx.conn(), &hdr.block_hash) + .expect(&format!( + "BUG: failed to load existing block {} ({})", + &hdr.block_hash, &hdr.block_height + )); + + for parent_op in blk.ops.into_iter() { + if let BlockstackOperationType::LeaderBlockCommit(parent_opdata) = parent_op { + if parent_opdata.vtxindex == opdata.parent_vtxindex as u32 { + test_debug!( + "Parent of {},{},{} is {},{},{}", + &opdata.txid, + opdata.block_height, + opdata.vtxindex, + &parent_opdata.txid, + parent_opdata.block_height, + parent_opdata.vtxindex + ); + parents.insert(parent_opdata.txid.clone(), parent_opdata); + found = true; + } + } + } + if !found { + test_debug!( + "Orphan block commit {},{},{}", + &opdata.txid, + opdata.block_height, + opdata.vtxindex + ); + } + } + } + let mut parent_list: Vec<_> = parents.into_iter().map(|(_, cmt)| cmt).collect(); + parent_list.sort_by(|a, b| { + if a.block_height != b.block_height { + a.block_height.cmp(&b.block_height) + } else { + a.vtxindex.cmp(&b.vtxindex) + } + }); + + test_debug!("Read {} parent block-commits", parent_list.len()); + Ok(parent_list) +} + +/// Given a list of prepare-phase block-commits, and a list of parent commits, filter out and remove +/// the prepare-phase commits that _don't_ have a parent. +pub fn filter_orphan_block_commits( + parents: &Vec, + prepare_phase_ops: Vec>, +) -> Vec> { + let mut parent_set = HashSet::new(); + for parent in parents.iter() { + parent_set.insert((parent.block_height, parent.vtxindex)); + } + for prepare_phase_block in prepare_phase_ops.iter() { + for opdata in prepare_phase_block.iter() { + parent_set.insert((opdata.block_height, opdata.vtxindex)); + } + } + + prepare_phase_ops + .into_iter() + .map(|prepare_phase_block| { + prepare_phase_block + .into_iter() + .filter(|opdata| { + if parent_set.contains(&( + opdata.parent_block_ptr as u64, + opdata.parent_vtxindex as u32, + )) { + true + } else { + test_debug!( + "Ignore invalid block-commit {},{} ({}): no parent {},{}", + opdata.block_height, + opdata.vtxindex, + &opdata.txid, + opdata.parent_block_ptr, + opdata.parent_vtxindex + ); + false + } + }) + .collect() + }) + .collect() +} + +/// Given a list of prepare-phase block-commits, filter out the ones that don't have correct burn +/// modulii. +pub fn filter_missed_block_commits( + prepare_phase_ops: Vec>, +) -> Vec> { + prepare_phase_ops + .into_iter() + .map(|commits| { + commits + .into_iter() + .filter(|cmt| { + let intended_modulus = + (cmt.burn_block_mined_at() + 1) % BURN_BLOCK_MINED_AT_MODULUS; + let actual_modulus = cmt.block_height % BURN_BLOCK_MINED_AT_MODULUS; + if actual_modulus == intended_modulus { + true + } else { + test_debug!( + "Ignore invalid block-commit {},{} ({}): {} != {}", + cmt.block_height, + cmt.vtxindex, + &cmt.txid, + actual_modulus, + intended_modulus + ); + false + } + }) + .collect() + }) + .collect() +} + +/// Given a list of block-commits in the prepare-phase, find the block-commit outside the +/// prepare-phase which must be the anchor block, if it exists at all. This is always +/// the block-commit that has the most cumulative BTC committed behind it (and the highest +/// such in the event of a tie), as well as at least `anchor_threshold` confirmations. If the anchor block +/// commit is found, return the descendancy matrix for it as well. +pub fn find_heaviest_block_commit<'a, B: BurnchainHeaderReader>( + burnchain_tx: &BurnchainDBTransaction<'a>, + indexer: &B, + prepare_ops: &Vec>, + anchor_threshold: u32, +) -> Result>)>, DBError> { + // sanity check -- must be in order by block height and vtxindex + for prepare_block_ops in prepare_ops.iter() { + let mut expected_block_height = None; + let mut last_vtxindex = None; + for opdata in prepare_block_ops.iter() { + if let Some(expected_block_height) = expected_block_height.as_ref() { + assert_eq!(expected_block_height, &opdata.block_height); + } else { + expected_block_height = Some(opdata.block_height); + } + + if let Some(last_vtxindex) = last_vtxindex.as_mut() { + assert!(*last_vtxindex < opdata.vtxindex); + *last_vtxindex = opdata.vtxindex; + } else { + last_vtxindex = Some(opdata.vtxindex); + } + test_debug!( + "Prepare-phase block-commit {},{}: {}", + opdata.block_height, + opdata.vtxindex, + &opdata.txid + ); + } + } + + // map (block_height, vtxindex) to (burnt, parent_block_height, parent_vtxindex) + let mut parents = BTreeMap::new(); + + // map (block_height, vtxindex) to (non-prepare-ancestor-height, non-prepare-ancestor-vtxindex, total_burnt) + let mut ancestors = BTreeMap::new(); + + // map (non-prepare-ancestor-height, non-prepare-ancestor-vtxindex) to (set-of-block-heights, total_burnt) + // that contain descendants + let mut ancestor_confirmations: BTreeMap<(u64, u32), (HashSet, u64)> = BTreeMap::new(); + + // calculate each block-commit's parents + for prepare_block_ops in prepare_ops.iter() { + for opdata in prepare_block_ops.iter() { + parents.insert( + (opdata.block_height, opdata.vtxindex), + ( + opdata.burn_fee, + opdata.parent_block_ptr as u64, + opdata.parent_vtxindex as u32, + ), + ); + } + } + + // calculate the ancestor map -- find the highest ancestor for each prepare-phase block-commit + // that is _not_ in the prepare phase. + for prepare_block_ops in prepare_ops.iter().rev() { + for opdata in prepare_block_ops.iter() { + let mut cursor = (opdata.block_height, opdata.vtxindex); + let mut total_burnt = 0; + while !ancestors.contains_key(&cursor) { + if let Some((burnt, parent_block, parent_vtxindex)) = parents.get(&cursor) { + cursor = (*parent_block, *parent_vtxindex); + total_burnt += *burnt; + } else { + break; + } + } + if !ancestors.contains_key(&cursor) { + ancestors.insert( + (opdata.block_height, opdata.vtxindex), + (cursor.0, cursor.1, total_burnt), + ); + } + } + } + + // calculate the ancestor confirmations -- figure out how many distinct blocks contain + // block-commits that descend from each pre-prepare-phase ancestor + for prepare_block_ops in prepare_ops.iter() { + for opdata in prepare_block_ops.iter() { + if let Some((ancestor_height, ancestor_vtxindex, total_burnt)) = + ancestors.get(&(opdata.block_height, opdata.vtxindex)) + { + if let Some((ref mut confirmed_block_set, ref mut ancestor_burnt)) = + ancestor_confirmations.get_mut(&(*ancestor_height, *ancestor_vtxindex)) + { + confirmed_block_set.insert(opdata.block_height); + *ancestor_burnt = cmp::max(*total_burnt, *ancestor_burnt); + } else { + let mut block_set = HashSet::new(); + block_set.insert(opdata.block_height); + ancestor_confirmations.insert( + (*ancestor_height, *ancestor_vtxindex), + (block_set, *total_burnt), + ); + } + } + } + } + + test_debug!("parents = {:?}", &parents); + test_debug!("ancestors = {:?}", &ancestors); + test_debug!("ancestor_confirmations = {:?}", &ancestor_confirmations); + + if ancestor_confirmations.len() == 0 { + // empty prepare phase + test_debug!("Prepare-phase has no block-commits"); + return Ok(None); + } + + // find the ancestors with at least $anchor_threshold confirmations, and pick the one that has the + // most total BTC. Break ties by ancestor order -- highest ancestor commit wins. + let mut ancestor_block = 0; + let mut ancestor_vtxindex = 0; + let mut most_burnt = 0; + let mut most_confs = 0; + + // consider ancestor candidates in _highest_-first order + for ((height, vtxindex), (block_set, burnt)) in ancestor_confirmations.iter().rev() { + let confs = block_set.len() as u64; + if confs < anchor_threshold.into() { + continue; + } + if *burnt > most_burnt { + most_burnt = *burnt; + most_confs = confs; + ancestor_block = *height; + ancestor_vtxindex = *vtxindex; + } + } + + if most_burnt == 0 { + // no anchor block possible -- no block-commit has enough confirmations + test_debug!("No block-commit has enough support to be an anchor block"); + return Ok(None); + } + + // find the ancestor that this tip confirms + let heaviest_ancestor_header = indexer + .read_burnchain_headers(ancestor_block, ancestor_block + 1)? + .first() + .expect(&format!( + "BUG: no block headers for height {}", + ancestor_block + )) + .to_owned(); + + let heaviest_ancestor_block = + BurnchainDB::get_burnchain_block(burnchain_tx.conn(), &heaviest_ancestor_header.block_hash) + .expect(&format!( + "BUG: no ancestor block {:?} ({})", + &heaviest_ancestor_header.block_hash, heaviest_ancestor_header.block_height + )); + + // find the PoX anchor block-commit, if it exists at all + // (note that it may not -- a rich attacker can force F*w confirmations with lots of BTC on a + // commit that was never mined). + for block_op in heaviest_ancestor_block.ops.into_iter() { + if let BlockstackOperationType::LeaderBlockCommit(opdata) = block_op { + if opdata.block_height == ancestor_block && opdata.vtxindex == ancestor_vtxindex { + // found + debug!( + "PoX anchor block-commit {},{},{} has {} burnt, {} confs", + &opdata.txid, opdata.block_height, opdata.vtxindex, most_burnt, most_confs + ); + + let mut descendancy = Vec::with_capacity(prepare_ops.len()); + for prepare_block_ops in prepare_ops.iter() { + let mut block_descendancy = Vec::with_capacity(prepare_ops.len()); + for opdata in prepare_block_ops.iter() { + if let Some((op_ancestor_height, op_ancestor_vtxindex, ..)) = + ancestors.get(&(opdata.block_height, opdata.vtxindex)) + { + if *op_ancestor_height == ancestor_block + && *op_ancestor_vtxindex == ancestor_vtxindex + { + test_debug!("Block-commit {},{} descends from likely PoX anchor block {},{}", opdata.block_height, opdata.vtxindex, op_ancestor_height, op_ancestor_vtxindex); + block_descendancy.push(true); + } else { + test_debug!("Block-commit {},{} does NOT descend from likely PoX anchor block {},{}", opdata.block_height, opdata.vtxindex, ancestor_block, ancestor_vtxindex); + block_descendancy.push(false); + } + } else { + test_debug!("Block-commit {},{} does NOT descend from likely PoX anchor block {},{}", opdata.block_height, opdata.vtxindex, ancestor_block, ancestor_vtxindex); + block_descendancy.push(false); + } + } + descendancy.push(block_descendancy); + } + + return Ok(Some((opdata, descendancy))); + } + } + } + + warn!("Evil miners confirmed a non-existant PoX anchor block!"); + Ok(None) +} + +/// Find the PoX anchor block selected in a reward cycle, if it exists. This is the heaviest F*w-confirmed +/// block-commit before the prepare-phase of this reward cycle, provided that it is not already an +/// anchor block for some other reward cycle. Note that the anchor block found will be the anchor +/// block for the *next* reward cycle. +/// Returns: +/// (a) the list of block-commits, grouped by block and ordered by vtxindex, in this prepare phase +/// (b) the PoX anchor block-commit, if it exists, and +/// (c) the descendancy data for the prepare phase. Descendency[i][j] is true if the jth +/// block-commit in the ith block in the prepare phase descends from the anchor block, or False +/// if not. +pub fn find_pox_anchor_block<'a, B: BurnchainHeaderReader>( + burnchain_tx: &BurnchainDBTransaction<'a>, + reward_cycle: u64, + indexer: &B, + burnchain: &Burnchain, +) -> Result< + ( + Vec>, + Option<(LeaderBlockCommitOp, Vec>)>, + ), + Error, +> { + let pox_consts = &burnchain.pox_constants; + let first_block_height = burnchain.first_block_height; + + let prepare_ops = read_prepare_phase_commits( + burnchain_tx, + indexer, + pox_consts, + first_block_height, + reward_cycle, + )?; + test_debug!("{} prepare-phase commits", prepare_ops.len()); + + let parent_commits = read_parent_block_commits(burnchain_tx, indexer, &prepare_ops)?; + test_debug!("{} parent block-commits", parent_commits.len()); + + let prepare_ops_no_orphans = filter_orphan_block_commits(&parent_commits, prepare_ops); + test_debug!( + "{} prepare-phase block-commits that have parents", + prepare_ops_no_orphans.len() + ); + + let prepare_ops_valid = filter_missed_block_commits(prepare_ops_no_orphans); + test_debug!( + "{} prepare-phase block-commits that have parents and are on-time", + prepare_ops_valid.len() + ); + + let anchor_block_and_descendancy_opt = find_heaviest_block_commit( + &burnchain_tx, + indexer, + &prepare_ops_valid, + burnchain.pox_constants.anchor_threshold, + )?; + if let Some((ref anchor_block_commit, _)) = anchor_block_and_descendancy_opt.as_ref() { + // cannot have been an anchor block in some other reward cycle + let md = BurnchainDB::get_commit_metadata( + burnchain_tx.conn(), + &anchor_block_commit.burn_header_hash, + &anchor_block_commit.txid, + )? + .expect("BUG: anchor block commit has not metadata"); + + if let Some(rc) = md.anchor_block { + warn!( + "Block-commit {} is already an anchor block for reward cycle {}", + &anchor_block_commit.txid, rc + ); + return Ok((prepare_ops_valid, None)); + } + } + + if anchor_block_and_descendancy_opt.is_some() { + test_debug!( + "Selected an anchor block in prepare phase of reward cycle {}", + reward_cycle + ); + } else { + test_debug!( + "Did NOT select an anchor block in prepare phase of reward cycle {}", + reward_cycle + ); + } + + Ok((prepare_ops_valid, anchor_block_and_descendancy_opt)) +} + +/// Update a completed reward cycle's affirmation maps +pub fn update_pox_affirmation_maps( + burnchain_db: &mut BurnchainDB, + indexer: &B, + reward_cycle: u64, + burnchain: &Burnchain, +) -> Result<(), Error> { + debug!("Process PoX affirmations for reward cycle {}", reward_cycle); + + let tx = burnchain_db.tx_begin()?; + + let (prepare_ops, pox_anchor_block_info_opt) = + find_pox_anchor_block(&tx, reward_cycle, indexer, burnchain)?; + + if let Some((anchor_block, descendancy)) = pox_anchor_block_info_opt.clone() { + debug!( + "PoX anchor block elected in reward cycle {} for reward cycle {} is {}", + reward_cycle, + reward_cycle + 1, + &anchor_block.block_header_hash + ); + + // anchor block found for this upcoming reward cycle + tx.set_anchor_block(&anchor_block, reward_cycle + 1)?; + assert_eq!(descendancy.len(), prepare_ops.len()); + + // mark the prepare-phase commits that elected this next reward cycle's anchor block as + // having descended or not descended from this anchor block. + for (block_idx, block_ops) in prepare_ops.iter().enumerate() { + assert_eq!(block_ops.len(), descendancy[block_idx].len()); + + for (tx_idx, tx_op) in block_ops.iter().enumerate() { + test_debug!( + "Make affirmation map for block-commit at {},{}", + tx_op.block_height, + tx_op.vtxindex + ); + tx.make_prepare_phase_affirmation_map( + indexer, + burnchain, + reward_cycle + 1, + tx_op, + Some(&anchor_block), + descendancy[block_idx][tx_idx], + )?; + } + } + } else { + debug!("PoX anchor block selected in reward cycle {} is None. Reward cycle {} has no anchor block", reward_cycle, reward_cycle + 1); + + // anchor block not found for this upcoming reward cycle + tx.clear_anchor_block(reward_cycle + 1)?; + + // mark the prepare-phase commits that did NOT elect this next reward cycle's anchor + // block as NOT having descended from any anchor block (since one was not chosen) + for block_ops in prepare_ops.iter() { + for tx_op in block_ops.iter() { + test_debug!( + "Make affirmation map for block-commit at {},{}", + tx_op.block_height, + tx_op.vtxindex + ); + tx.make_prepare_phase_affirmation_map( + indexer, + burnchain, + reward_cycle + 1, + tx_op, + None, + false, + )?; + } + } + } + + tx.commit()?; + debug!( + "Processed PoX affirmations for reward cycle {}", + reward_cycle + ); + + Ok(()) +} + +#[cfg(test)] +mod test { + use super::*; + use std::cmp; + use std::collections::HashSet; + use std::collections::VecDeque; + use std::sync::{ + atomic::{AtomicBool, AtomicU64, Ordering}, + mpsc::sync_channel, + Arc, RwLock, + }; + + use rusqlite::Connection; + + use address; + use burnchains::bitcoin::address::BitcoinAddress; + use burnchains::bitcoin::indexer::BitcoinIndexer; + use burnchains::bitcoin::BitcoinNetworkType; + use burnchains::db::tests::*; + use burnchains::{db::*, *}; + use chainstate; + use chainstate::burn::db::sortdb::SortitionDB; + use chainstate::burn::operations::leader_block_commit::*; + use chainstate::burn::operations::*; + use chainstate::burn::*; + use chainstate::coordinator::{Error as CoordError, *}; + use chainstate::stacks::*; + use clarity_vm::clarity::ClarityConnection; + use core; + use core::*; + use monitoring::increment_stx_blocks_processed_counter; + use util::hash::{hex_bytes, Hash160}; + use util::vrf::*; + use vm::{ + costs::{ExecutionCost, LimitedCostTracker}, + types::PrincipalData, + types::QualifiedContractIdentifier, + Value, + }; + + use crate::types::chainstate::StacksBlockId; + use crate::types::chainstate::{ + BlockHeaderHash, BurnchainHeaderHash, PoxId, SortitionId, StacksAddress, VRFSeed, + }; + use crate::types::proof::TrieHash; + use crate::{types, util}; + + use chainstate::coordinator::tests::*; + + #[test] + fn affirmation_map_encode_decode() { + assert_eq!(AffirmationMap::decode(""), Some(AffirmationMap::empty())); + assert_eq!( + AffirmationMap::decode("anp"), + Some(AffirmationMap { + affirmations: vec![ + AffirmationMapEntry::PoxAnchorBlockAbsent, + AffirmationMapEntry::Nothing, + AffirmationMapEntry::PoxAnchorBlockPresent + ] + }) + ); + assert_eq!(AffirmationMap::decode("x"), None); + + assert_eq!(AffirmationMap::empty().encode(), "".to_string()); + assert_eq!( + AffirmationMap { + affirmations: vec![ + AffirmationMapEntry::PoxAnchorBlockAbsent, + AffirmationMapEntry::Nothing, + AffirmationMapEntry::PoxAnchorBlockPresent + ] + } + .encode(), + "anp".to_string() + ); + } + + #[test] + fn affirmation_map_find_divergence() { + assert_eq!( + AffirmationMap::decode("aaa") + .unwrap() + .find_divergence(&AffirmationMap::decode("aaa").unwrap()), + None + ); + assert_eq!( + AffirmationMap::decode("aaa") + .unwrap() + .find_divergence(&AffirmationMap::decode("aaaa").unwrap()), + Some(3) + ); + assert_eq!( + AffirmationMap::decode("aaa") + .unwrap() + .find_divergence(&AffirmationMap::decode("aa").unwrap()), + None + ); + assert_eq!( + AffirmationMap::decode("apa") + .unwrap() + .find_divergence(&AffirmationMap::decode("aaa").unwrap()), + Some(1) + ); + assert_eq!( + AffirmationMap::decode("apa") + .unwrap() + .find_divergence(&AffirmationMap::decode("aaaa").unwrap()), + Some(1) + ); + assert_eq!( + AffirmationMap::decode("naa") + .unwrap() + .find_divergence(&AffirmationMap::decode("aa").unwrap()), + Some(0) + ); + assert_eq!( + AffirmationMap::decode("napn") + .unwrap() + .find_divergence(&AffirmationMap::decode("").unwrap()), + None + ); + assert_eq!( + AffirmationMap::decode("pn") + .unwrap() + .find_divergence(&AffirmationMap::decode("n").unwrap()), + Some(0) + ); + } + + fn make_simple_key_register( + burn_header_hash: &BurnchainHeaderHash, + block_height: u64, + vtxindex: u32, + ) -> LeaderKeyRegisterOp { + LeaderKeyRegisterOp { + consensus_hash: ConsensusHash::from_bytes( + &hex_bytes("2222222222222222222222222222222222222222").unwrap(), + ) + .unwrap(), + public_key: VRFPublicKey::from_bytes( + &hex_bytes("a366b51292bef4edd64063d9145c617fec373bceb0758e98cd72becd84d54c7a") + .unwrap(), + ) + .unwrap(), + memo: vec![01, 02, 03, 04, 05], + address: StacksAddress::from_bitcoin_address( + &BitcoinAddress::from_scriptpubkey( + BitcoinNetworkType::Testnet, + &hex_bytes("76a9140be3e286a15ea85882761618e366586b5574100d88ac").unwrap(), + ) + .unwrap(), + ), + + txid: next_txid(), + vtxindex: vtxindex, + block_height: block_height, + burn_header_hash: burn_header_hash.clone(), + } + } + + pub fn make_reward_cycle_with_vote( + burnchain_db: &mut BurnchainDB, + burnchain: &Burnchain, + key: &LeaderKeyRegisterOp, + headers: &mut Vec, + mut parent_commits: Vec>, + confirm_anchor_block: bool, + ) -> ( + Vec, + Vec>>, + ) { + let mut new_headers = vec![]; + let mut new_commits = vec![]; + + let first_block_header = burnchain_db.get_first_header().unwrap(); + let mut current_header = burnchain_db.get_canonical_chain_tip().unwrap(); + let mut height = current_header.block_height + 1; + let mut parent_block_header: Option = + Some(headers.last().unwrap().to_owned()); + + for i in 0..burnchain.pox_constants.reward_cycle_length { + let block_header = BurnchainBlockHeader { + block_height: height, + block_hash: next_burn_header_hash(), + parent_block_hash: parent_block_header + .as_ref() + .map(|blk| blk.block_hash.clone()) + .unwrap_or(first_block_header.block_hash.clone()), + num_txs: parent_commits.len() as u64, + timestamp: i as u64, + }; + + let ops = if current_header == first_block_header { + // first-ever block -- add only the leader key + let mut key_insert = key.clone(); + key_insert.burn_header_hash = block_header.block_hash.clone(); + + test_debug!( + "Insert key-register in {}: {},{},{} in block {}", + &key_insert.burn_header_hash, + &key_insert.txid, + key_insert.block_height, + key_insert.vtxindex, + block_header.block_height + ); + + new_commits.push(vec![None; parent_commits.len()]); + vec![BlockstackOperationType::LeaderKeyRegister( + key_insert.clone(), + )] + } else { + let mut commits = vec![]; + for i in 0..parent_commits.len() { + let mut block_commit = make_simple_block_commit( + &burnchain, + parent_commits[i].as_ref(), + &block_header, + next_block_hash(), + ); + block_commit.key_block_ptr = key.block_height as u32; + block_commit.key_vtxindex = key.vtxindex as u16; + block_commit.vtxindex += i as u32; + block_commit.burn_parent_modulus = if height > 0 { + ((height - 1) % BURN_BLOCK_MINED_AT_MODULUS) as u8 + } else { + BURN_BLOCK_MINED_AT_MODULUS as u8 - 1 + }; + + assert_eq!(block_commit.burn_header_hash, block_header.block_hash); + assert_eq!(block_commit.block_height, block_header.block_height); + + let append = if !burnchain.is_in_prepare_phase(block_commit.block_height) { + // non-prepare-phase commits always confirm their parent + true + } else { + if confirm_anchor_block { + // all block-commits confirm anchor block + true + } else { + // fewer than anchor_threshold commits confirm anchor block + let next_rc_start = burnchain.reward_cycle_to_block_height( + burnchain + .block_height_to_reward_cycle(block_commit.block_height) + .unwrap() + + 1, + ); + if block_commit.block_height + + (burnchain.pox_constants.anchor_threshold as u64) + + 1 + < next_rc_start + { + // in first half of prepare phase, so confirm + true + } else { + // in second half of prepare phase, so don't confirm + false + } + } + }; + + if append { + test_debug!( + "Insert block-commit in {}: {},{},{}, builds on {},{}", + &block_commit.burn_header_hash, + &block_commit.txid, + block_commit.block_height, + block_commit.vtxindex, + block_commit.parent_block_ptr, + block_commit.parent_vtxindex + ); + + if let Some(ref parent_commit) = parent_commits[i].as_ref() { + assert!( + parent_commit.block_height as u64 + != block_commit.block_height as u64 + ); + assert!( + parent_commit.block_height as u64 + == block_commit.parent_block_ptr as u64 + ); + assert!( + parent_commit.vtxindex as u64 + == block_commit.parent_vtxindex as u64 + ); + } + + parent_commits[i] = Some(block_commit.clone()); + commits.push(Some(block_commit.clone())); + } else { + test_debug!( + "Do NOT insert block-commit in {}: {},{},{}", + &block_commit.burn_header_hash, + &block_commit.txid, + block_commit.block_height, + block_commit.vtxindex + ); + + commits.push(None); + } + } + new_commits.push(commits.clone()); + commits + .into_iter() + .filter_map(|cmt| cmt) + .map(|cmt| BlockstackOperationType::LeaderBlockCommit(cmt)) + .collect() + }; + + burnchain_db + .store_new_burnchain_block_ops_unchecked(burnchain, headers, &block_header, &ops) + .unwrap(); + + headers.push(block_header.clone()); + new_headers.push(block_header.clone()); + parent_block_header = Some(block_header); + + current_header = burnchain_db.get_canonical_chain_tip().unwrap(); + height = current_header.block_height + 1; + } + + (new_headers, new_commits) + } + + fn make_simple_reward_cycle( + burnchain_db: &mut BurnchainDB, + burnchain: &Burnchain, + key: &LeaderKeyRegisterOp, + headers: &mut Vec, + parent_commit: Option, + ) -> (Vec, Vec>) { + let (new_headers, commits) = + make_reward_cycle(burnchain_db, burnchain, key, headers, vec![parent_commit]); + ( + new_headers, + commits + .into_iter() + .map(|mut cmts| cmts.pop().unwrap()) + .collect(), + ) + } + + pub fn make_reward_cycle( + burnchain_db: &mut BurnchainDB, + burnchain: &Burnchain, + key: &LeaderKeyRegisterOp, + headers: &mut Vec, + parent_commits: Vec>, + ) -> ( + Vec, + Vec>>, + ) { + make_reward_cycle_with_vote(burnchain_db, burnchain, key, headers, parent_commits, true) + } + + pub fn make_reward_cycle_without_anchor( + burnchain_db: &mut BurnchainDB, + burnchain: &Burnchain, + key: &LeaderKeyRegisterOp, + headers: &mut Vec, + parent_commits: Vec>, + ) -> ( + Vec, + Vec>>, + ) { + make_reward_cycle_with_vote(burnchain_db, burnchain, key, headers, parent_commits, false) + } + + #[test] + fn test_read_prepare_phase_commits() { + let first_bhh = BurnchainHeaderHash([0; 32]); + let first_timestamp = 0; + let first_height = 0; + + let mut burnchain = Burnchain::regtest(":memory:"); + burnchain.pox_constants = PoxConstants::new(10, 5, 3, 3, 0, 99, 100); + burnchain.first_block_height = first_height; + burnchain.first_block_hash = first_bhh.clone(); + burnchain.first_block_timestamp = first_timestamp; + + let mut burnchain_db = BurnchainDB::connect(":memory:", &burnchain, true).unwrap(); + + let first_block_header = burnchain_db.get_canonical_chain_tip().unwrap(); + assert_eq!(&first_block_header.block_hash, &first_bhh); + assert_eq!(first_block_header.block_height, first_height); + assert_eq!(first_block_header.timestamp, first_timestamp as u64); + /* + assert_eq!( + &first_block_header.parent_block_hash, + &BurnchainHeaderHash::sentinel() + ); + */ + eprintln!( + "First block parent is {}", + &first_block_header.parent_block_hash + ); + + let mut headers = vec![first_block_header.clone()]; + let key_register = make_simple_key_register(&first_block_header.block_hash, 0, 1); + let (next_headers, commits) = make_simple_reward_cycle( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + None, + ); + + assert_eq!( + commits.len() as u32, + burnchain.pox_constants.reward_cycle_length + ); + assert!(commits[0].is_none()); + for i in 1..burnchain.pox_constants.reward_cycle_length { + assert!(commits[i as usize].is_some()); + } + + let all_ops = read_prepare_phase_commits( + &burnchain_db.tx_begin().unwrap(), + &headers, + &burnchain.pox_constants, + first_block_header.block_height, + 0, + ) + .unwrap(); + assert_eq!(all_ops.len() as u32, burnchain.pox_constants.prepare_length); + for i in 0..burnchain.pox_constants.prepare_length { + assert_eq!(all_ops[i as usize].len(), 1); + + let opdata = &all_ops[i as usize][0]; + assert_eq!( + opdata, + commits[(i + burnchain.pox_constants.reward_cycle_length + - burnchain.pox_constants.prepare_length) as usize] + .as_ref() + .unwrap() + ); + } + } + + #[test] + fn test_parent_block_commits() { + let first_bhh = BurnchainHeaderHash([0; 32]); + let first_timestamp = 0; + let first_height = 0; + + let mut burnchain = Burnchain::regtest(":memory:"); + burnchain.pox_constants = PoxConstants::new(10, 5, 3, 3, 0, 99, 100); + burnchain.first_block_height = first_height; + burnchain.first_block_hash = first_bhh.clone(); + burnchain.first_block_timestamp = first_timestamp; + + let mut burnchain_db = BurnchainDB::connect(":memory:", &burnchain, true).unwrap(); + + let first_block_header = burnchain_db.get_canonical_chain_tip().unwrap(); + + let mut headers = vec![first_block_header.clone()]; + let key_register = make_simple_key_register(&first_block_header.block_hash, 0, 1); + + // first reward cycle is all (linear) commits, so it must elect an anchor block + let (next_headers, commits) = make_simple_reward_cycle( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + None, + ); + + let all_ops = read_prepare_phase_commits( + &burnchain_db.tx_begin().unwrap(), + &headers, + &burnchain.pox_constants, + first_block_header.block_height, + 0, + ) + .unwrap(); + let parent_commits = + read_parent_block_commits(&burnchain_db.tx_begin().unwrap(), &headers, &all_ops) + .unwrap(); + + // this is a simple reward cycle -- each block-commit has a unique parent + assert_eq!(parent_commits.len(), all_ops.len()); + + for op_list in all_ops.iter() { + for opdata in op_list.iter() { + let mut found_parent = false; + for parent_commit in parent_commits.iter() { + if parent_commit.block_height == (opdata.parent_block_ptr as u64) + && parent_commit.vtxindex == (opdata.parent_vtxindex as u32) + { + found_parent = true; + break; + } + } + assert!(found_parent, "did not find parent for {:?}", opdata); + } + } + + let mut all_ops_with_orphan = all_ops.clone(); + all_ops_with_orphan[1][0].parent_vtxindex += 1; + + let parent_commits = read_parent_block_commits( + &burnchain_db.tx_begin().unwrap(), + &headers, + &all_ops_with_orphan, + ) + .unwrap(); + + // this is a simple reward cycle -- each block-commit has a unique parent, except for the + // orphan + assert_eq!(parent_commits.len(), all_ops_with_orphan.len() - 1); + + let mut all_ops_with_same_parent = all_ops.clone(); + for ops in all_ops_with_same_parent.iter_mut() { + for opdata in ops.iter_mut() { + opdata.parent_block_ptr = 3; + opdata.parent_vtxindex = 0; + } + } + + let parent_commits = read_parent_block_commits( + &burnchain_db.tx_begin().unwrap(), + &headers, + &all_ops_with_same_parent, + ) + .unwrap(); + + assert_eq!(parent_commits.len(), 1); + assert_eq!(parent_commits[0].block_height, 3); + assert_eq!(parent_commits[0].vtxindex, 0); + } + + #[test] + fn test_filter_orphan_block_commits() { + let first_bhh = BurnchainHeaderHash([0; 32]); + let first_timestamp = 0; + let first_height = 0; + + let mut burnchain = Burnchain::regtest(":memory:"); + burnchain.pox_constants = PoxConstants::new(5, 3, 3, 3, 0, 99, 100); + burnchain.first_block_height = first_height; + burnchain.first_block_hash = first_bhh.clone(); + burnchain.first_block_timestamp = first_timestamp; + + let mut burnchain_db = BurnchainDB::connect(":memory:", &burnchain, true).unwrap(); + + let first_block_header = burnchain_db.get_canonical_chain_tip().unwrap(); + + let mut headers = vec![first_block_header.clone()]; + let key_register = make_simple_key_register(&first_block_header.block_hash, 0, 1); + + // first reward cycle is all (linear) commits, so it must elect an anchor block + let (next_headers, commits) = make_simple_reward_cycle( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + None, + ); + + let all_ops = read_prepare_phase_commits( + &burnchain_db.tx_begin().unwrap(), + &headers, + &burnchain.pox_constants, + first_block_header.block_height, + 0, + ) + .unwrap(); + let parent_commits = + read_parent_block_commits(&burnchain_db.tx_begin().unwrap(), &headers, &all_ops) + .unwrap(); + + let mut all_ops_with_orphan = all_ops.clone(); + all_ops_with_orphan[1][0].parent_vtxindex += 1; + + assert_eq!(all_ops_with_orphan[0].len(), 1); + assert_eq!(all_ops_with_orphan[1].len(), 1); + assert_eq!(all_ops_with_orphan[2].len(), 1); + + let parent_commits = read_parent_block_commits( + &burnchain_db.tx_begin().unwrap(), + &headers, + &all_ops_with_orphan, + ) + .unwrap(); + let filtered_ops = filter_orphan_block_commits(&parent_commits, all_ops_with_orphan); + + assert_eq!(filtered_ops.len(), all_ops.len()); + assert_eq!(filtered_ops[0].len(), 1); + assert_eq!(filtered_ops[1].len(), 0); + assert_eq!(filtered_ops[2].len(), 1); + } + + #[test] + fn test_filter_missed_block_commits() { + let first_bhh = BurnchainHeaderHash([0; 32]); + let first_timestamp = 0; + let first_height = 0; + + let mut burnchain = Burnchain::regtest(":memory:"); + burnchain.pox_constants = PoxConstants::new(5, 3, 3, 3, 0, 99, 100); + burnchain.first_block_height = first_height; + burnchain.first_block_hash = first_bhh.clone(); + burnchain.first_block_timestamp = first_timestamp; + + let mut burnchain_db = BurnchainDB::connect(":memory:", &burnchain, true).unwrap(); + + let first_block_header = burnchain_db.get_canonical_chain_tip().unwrap(); + + let mut headers = vec![first_block_header.clone()]; + let key_register = make_simple_key_register(&first_block_header.block_hash, 0, 1); + + // first reward cycle is all (linear) commits, so it must elect an anchor block + let (next_headers, commits) = make_simple_reward_cycle( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + None, + ); + + let all_ops = read_prepare_phase_commits( + &burnchain_db.tx_begin().unwrap(), + &headers, + &burnchain.pox_constants, + first_block_header.block_height, + 0, + ) + .unwrap(); + let parent_commits = + read_parent_block_commits(&burnchain_db.tx_begin().unwrap(), &headers, &all_ops) + .unwrap(); + + let mut all_ops_with_missed = all_ops.clone(); + all_ops_with_missed[1][0].burn_parent_modulus -= 1; + + assert_eq!(all_ops_with_missed[0].len(), 1); + assert_eq!(all_ops_with_missed[1].len(), 1); + assert_eq!(all_ops_with_missed[2].len(), 1); + + let parent_commits = read_parent_block_commits( + &burnchain_db.tx_begin().unwrap(), + &headers, + &all_ops_with_missed, + ) + .unwrap(); + let filtered_ops = filter_missed_block_commits(all_ops_with_missed); + + assert_eq!(filtered_ops.len(), all_ops.len()); + assert_eq!(filtered_ops[0].len(), 1); + assert_eq!(filtered_ops[1].len(), 0); + assert_eq!(filtered_ops[2].len(), 1); + } + + #[test] + fn test_find_heaviest_block_commit() { + let first_bhh = BurnchainHeaderHash([0; 32]); + let first_timestamp = 0; + let first_height = 0; + + let mut burnchain = Burnchain::regtest(":memory:"); + burnchain.pox_constants = PoxConstants::new(5, 3, 2, 3, 0, 99, 100); + burnchain.first_block_height = first_height; + burnchain.first_block_hash = first_bhh.clone(); + burnchain.first_block_timestamp = first_timestamp; + + let mut burnchain_db = BurnchainDB::connect(":memory:", &burnchain, true).unwrap(); + + let first_block_header = burnchain_db.get_canonical_chain_tip().unwrap(); + + let mut headers = vec![first_block_header.clone()]; + let key_register = make_simple_key_register(&first_block_header.block_hash, 0, 1); + + // first reward cycle is all (linear) commits, so it must elect an anchor block + let (next_headers, commits) = make_simple_reward_cycle( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + None, + ); + + let all_ops = read_prepare_phase_commits( + &burnchain_db.tx_begin().unwrap(), + &headers, + &burnchain.pox_constants, + first_block_header.block_height, + 0, + ) + .unwrap(); + let parent_commits = + read_parent_block_commits(&burnchain_db.tx_begin().unwrap(), &headers, &all_ops) + .unwrap(); + let filtered_ops = filter_orphan_block_commits(&parent_commits, all_ops); + + let heaviest_parent_commit_opt = find_heaviest_block_commit( + &burnchain_db.tx_begin().unwrap(), + &headers, + &filtered_ops, + burnchain.pox_constants.anchor_threshold, + ) + .unwrap(); + assert!(heaviest_parent_commit_opt.is_some()); + let (heaviest_parent_block_commit, descendancy) = heaviest_parent_commit_opt.unwrap(); + + // since this is just a linear chain of block-commits, the heaviest parent is the parent of the + // first block-commit in the prepare phase + assert_eq!(commits[1].as_ref().unwrap(), &heaviest_parent_block_commit); + assert_eq!(descendancy, vec![vec![true], vec![true], vec![true]]); + + // make a forked history, but with a best-tip + // 1,0 <-- 2,0 <-- 3,0 <-- 4,0 + // \ + // `---------------------------- 5,0 + let mut all_ops_forked_majority = filtered_ops.clone(); + all_ops_forked_majority[2][0].parent_block_ptr = 1; + all_ops_forked_majority[2][0].parent_vtxindex = 0; + + // still commit 1 + let heaviest_parent_commit_opt = find_heaviest_block_commit( + &burnchain_db.tx_begin().unwrap(), + &headers, + &all_ops_forked_majority, + burnchain.pox_constants.anchor_threshold, + ) + .unwrap(); + assert!(heaviest_parent_commit_opt.is_some()); + let (heaviest_parent_block_commit, descendancy) = heaviest_parent_commit_opt.unwrap(); + + assert_eq!(commits[1].as_ref().unwrap(), &heaviest_parent_block_commit); + assert_eq!(descendancy, vec![vec![true], vec![true], vec![false]]); + + // make a forked history, with another best-tip winner, but with a deeper fork split + // 1,0 <-- 2,0 <-- 3,0 + // \ + // `------- 4,0 <-- 5,0 + let mut all_ops_forked_majority = filtered_ops.clone(); + all_ops_forked_majority[1][0].parent_block_ptr = 2; + all_ops_forked_majority[1][0].parent_vtxindex = 0; + + all_ops_forked_majority[2][0].parent_block_ptr = 2; + all_ops_forked_majority[2][0].parent_vtxindex = 0; + + // still commit 1 + let heaviest_parent_commit_opt = find_heaviest_block_commit( + &burnchain_db.tx_begin().unwrap(), + &headers, + &all_ops_forked_majority, + burnchain.pox_constants.anchor_threshold, + ) + .unwrap(); + assert!(heaviest_parent_commit_opt.is_some()); + let (heaviest_parent_block_commit, descendancy) = heaviest_parent_commit_opt.unwrap(); + + assert_eq!(commits[1].as_ref().unwrap(), &heaviest_parent_block_commit); + assert_eq!(descendancy, vec![vec![true], vec![true], vec![true]]); + + // make a forked history where there is no best tip, but enough confirmations + // 1,0 <-- 2,0 <-- 3,0 + // |\ + // | `------- 4,0 + // \ + // `------------- 5,0 + let mut all_ops_no_majority = filtered_ops.clone(); + all_ops_no_majority[0][0].parent_block_ptr = 2; + all_ops_no_majority[0][0].parent_vtxindex = 0; + all_ops_no_majority[0][0].burn_fee = 0; + + all_ops_no_majority[1][0].parent_block_ptr = 2; + all_ops_no_majority[1][0].parent_vtxindex = 0; + all_ops_no_majority[1][0].burn_fee = 1; + + all_ops_no_majority[2][0].parent_block_ptr = 2; + all_ops_no_majority[2][0].parent_vtxindex = 0; + all_ops_no_majority[2][0].burn_fee = 2; + + let heaviest_parent_commit_opt = find_heaviest_block_commit( + &burnchain_db.tx_begin().unwrap(), + &headers, + &all_ops_no_majority, + burnchain.pox_constants.anchor_threshold, + ) + .unwrap(); + assert!(heaviest_parent_commit_opt.is_some()); + let (heaviest_parent_block_commit, descendancy) = heaviest_parent_commit_opt.unwrap(); + + assert_eq!(commits[1].as_ref().unwrap(), &heaviest_parent_block_commit); + assert_eq!(descendancy, vec![vec![true], vec![true], vec![true]]); + + // make a forked history where there is no best tip, but enough (majority) confirmations + // 1,0 <-- 2,0 <-- 3,0 + // | \ + // | `-------- 4,0 + // | + // `----------------------- 5,0 + let mut all_ops_no_majority = filtered_ops.clone(); + all_ops_no_majority[0][0].parent_block_ptr = 2; + all_ops_no_majority[0][0].parent_vtxindex = 0; + all_ops_no_majority[0][0].burn_fee = 0; + + all_ops_no_majority[1][0].parent_block_ptr = 2; + all_ops_no_majority[1][0].parent_vtxindex = 0; + all_ops_no_majority[1][0].burn_fee = 1; + + all_ops_no_majority[2][0].parent_block_ptr = 1; + all_ops_no_majority[2][0].parent_vtxindex = 0; + all_ops_no_majority[2][0].burn_fee = 20; + + let heaviest_parent_commit_opt = find_heaviest_block_commit( + &burnchain_db.tx_begin().unwrap(), + &headers, + &all_ops_no_majority, + burnchain.pox_constants.anchor_threshold, + ) + .unwrap(); + assert!(heaviest_parent_commit_opt.is_some()); + let (heaviest_parent_block_commit, descendancy) = heaviest_parent_commit_opt.unwrap(); + + assert_eq!(commits[1].as_ref().unwrap(), &heaviest_parent_block_commit); + assert_eq!(descendancy, vec![vec![true], vec![true], vec![false]]); + + // make a history where there is no anchor block, period + // 1,0 <-- 2,0 X-- 3,0 + // + // X------- 4,0 + // + // X------------ 5,0 + let mut all_ops_no_majority = filtered_ops.clone(); + all_ops_no_majority[0][0].parent_block_ptr = 2; + all_ops_no_majority[0][0].parent_vtxindex = 10; + all_ops_no_majority[0][0].burn_fee = 0; + + all_ops_no_majority[1][0].parent_block_ptr = 2; + all_ops_no_majority[1][0].parent_vtxindex = 10; + all_ops_no_majority[1][0].burn_fee = 1; + + all_ops_no_majority[2][0].parent_block_ptr = 1; + all_ops_no_majority[2][0].parent_vtxindex = 10; + all_ops_no_majority[2][0].burn_fee = 20; + + let heaviest_parent_commit_opt = find_heaviest_block_commit( + &burnchain_db.tx_begin().unwrap(), + &headers, + &all_ops_no_majority, + burnchain.pox_constants.anchor_threshold, + ) + .unwrap(); + assert!(heaviest_parent_commit_opt.is_none()); + } + + #[test] + fn test_find_heaviest_parent_commit_many_commits() { + let first_bhh = BurnchainHeaderHash([0; 32]); + let first_timestamp = 0; + let first_height = 0; + + let mut burnchain = Burnchain::regtest(":memory:"); + burnchain.pox_constants = PoxConstants::new(5, 3, 2, 3, 0, 99, 100); + burnchain.first_block_height = first_height; + burnchain.first_block_hash = first_bhh.clone(); + burnchain.first_block_timestamp = first_timestamp; + + let mut burnchain_db = BurnchainDB::connect(":memory:", &burnchain, true).unwrap(); + + let first_block_header = burnchain_db.get_canonical_chain_tip().unwrap(); + + let mut headers = vec![first_block_header.clone()]; + let key_register = make_simple_key_register(&first_block_header.block_hash, 0, 1); + + let (next_headers, commits) = make_reward_cycle( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + vec![None, None], + ); + + let all_ops = read_prepare_phase_commits( + &burnchain_db.tx_begin().unwrap(), + &headers, + &burnchain.pox_constants, + first_block_header.block_height, + 0, + ) + .unwrap(); + let parent_commits = + read_parent_block_commits(&burnchain_db.tx_begin().unwrap(), &headers, &all_ops) + .unwrap(); + let filtered_ops = filter_orphan_block_commits(&parent_commits, all_ops); + + // make a history with two miners' commits. + // sortition winners in prepare phase were 3,0; 4,1; 5,0 + // 1,0 <-- 2,0 <--- 3,0 <--- 4,0 ,--- 5,0 + // \ \ / + // `---- 3,1 `--- 4,1 <--- 5,1 + let mut all_ops_no_majority = filtered_ops.clone(); + + // 3,0 + all_ops_no_majority[0][0].parent_block_ptr = 2; + all_ops_no_majority[0][0].parent_vtxindex = 0; + all_ops_no_majority[0][0].vtxindex = 0; + all_ops_no_majority[0][0].burn_fee = 1; + + // 3,1 + all_ops_no_majority[0][1].parent_block_ptr = 2; + all_ops_no_majority[0][1].parent_vtxindex = 0; + all_ops_no_majority[0][1].vtxindex = 1; + all_ops_no_majority[0][1].burn_fee = 1; + + // 4,0 + all_ops_no_majority[1][0].parent_block_ptr = 3; + all_ops_no_majority[1][0].parent_vtxindex = 0; + all_ops_no_majority[1][0].vtxindex = 0; + all_ops_no_majority[1][0].burn_fee = 2; + + // 4,1 + all_ops_no_majority[1][1].parent_block_ptr = 3; + all_ops_no_majority[1][1].parent_vtxindex = 0; + all_ops_no_majority[1][1].vtxindex = 1; + all_ops_no_majority[1][1].burn_fee = 2; + + // 5,0 + all_ops_no_majority[2][0].parent_block_ptr = 4; + all_ops_no_majority[2][0].parent_vtxindex = 1; + all_ops_no_majority[2][0].vtxindex = 0; + all_ops_no_majority[2][0].burn_fee = 3; + + // 5,1 + all_ops_no_majority[2][1].parent_block_ptr = 4; + all_ops_no_majority[2][1].parent_vtxindex = 1; + all_ops_no_majority[2][1].vtxindex = 1; + all_ops_no_majority[2][1].burn_fee = 3; + + let heaviest_parent_commit_opt = find_heaviest_block_commit( + &burnchain_db.tx_begin().unwrap(), + &headers, + &all_ops_no_majority, + burnchain.pox_constants.anchor_threshold, + ) + .unwrap(); + assert!(heaviest_parent_commit_opt.is_some()); + let (heaviest_parent_block_commit, descendancy) = heaviest_parent_commit_opt.unwrap(); + + assert_eq!( + commits[1][0].as_ref().unwrap(), + &heaviest_parent_block_commit + ); + assert_eq!( + descendancy, + vec![vec![true, true], vec![true, true], vec![true, true]] + ); + + // make a history with two miners' commits, with some invalid commits. + // The heavier commit descendancy wins -- 2,1 is the anchor block. + // 1,0 <-- 2,0 <--- 3,0 <--- 4,0 <--- 5,0 (winner) + // \ + // `---- 2,1 <--- 3,1 <--- 4,1 <--- 5,1 + let mut all_ops_no_majority = filtered_ops.clone(); + + // 3,0 + all_ops_no_majority[0][0].parent_block_ptr = 2; + all_ops_no_majority[0][0].parent_vtxindex = 0; + all_ops_no_majority[0][0].vtxindex = 0; + all_ops_no_majority[0][0].burn_fee = 1; + + // 3,1 + all_ops_no_majority[0][1].parent_block_ptr = 2; + all_ops_no_majority[0][1].parent_vtxindex = 1; + all_ops_no_majority[0][1].vtxindex = 1; + all_ops_no_majority[0][1].burn_fee = 1; + + // 4,0 + all_ops_no_majority[1][0].parent_block_ptr = 3; + all_ops_no_majority[1][0].parent_vtxindex = 0; + all_ops_no_majority[1][0].vtxindex = 0; + all_ops_no_majority[1][0].burn_fee = 2; + + // 4,1 + all_ops_no_majority[1][1].parent_block_ptr = 3; + all_ops_no_majority[1][1].parent_vtxindex = 1; + all_ops_no_majority[1][1].vtxindex = 1; + all_ops_no_majority[1][1].burn_fee = 2; + + // 5,0 + all_ops_no_majority[2][0].parent_block_ptr = 4; + all_ops_no_majority[2][0].parent_vtxindex = 0; + all_ops_no_majority[2][0].vtxindex = 0; + all_ops_no_majority[2][0].burn_fee = 4; + + // 5,1 + all_ops_no_majority[2][1].parent_block_ptr = 4; + all_ops_no_majority[2][1].parent_vtxindex = 1; + all_ops_no_majority[2][1].vtxindex = 1; + all_ops_no_majority[2][1].burn_fee = 3; + + let heaviest_parent_commit_opt = find_heaviest_block_commit( + &burnchain_db.tx_begin().unwrap(), + &headers, + &all_ops_no_majority, + burnchain.pox_constants.anchor_threshold, + ) + .unwrap(); + assert!(heaviest_parent_commit_opt.is_some()); + let (heaviest_parent_block_commit, descendancy) = heaviest_parent_commit_opt.unwrap(); + + // best option wins + assert_eq!( + commits[1][0].as_ref().unwrap(), + &heaviest_parent_block_commit + ); + assert_eq!( + descendancy, + vec![vec![true, false], vec![true, false], vec![true, false]] + ); + + // make a history with two miners' commits, with some invalid commits. + // commit descendancy weight is a tie, so highest commit is the anchor block (2,1) + // 1,0 <-- 2,0 <--- 3,0 <--- 4,0 <--- 5,0 + // \ + // `---- 2,1 <--- 3,1 <--- 4,1 <--- 5,1 (winner) + let mut all_ops_no_majority = filtered_ops.clone(); + + // 3,0 + all_ops_no_majority[0][0].parent_block_ptr = 2; + all_ops_no_majority[0][0].parent_vtxindex = 0; + all_ops_no_majority[0][0].vtxindex = 0; + all_ops_no_majority[0][0].burn_fee = 1; + + // 3,1 + all_ops_no_majority[0][1].parent_block_ptr = 2; + all_ops_no_majority[0][1].parent_vtxindex = 1; + all_ops_no_majority[0][1].vtxindex = 1; + all_ops_no_majority[0][1].burn_fee = 1; + + // 4,0 + all_ops_no_majority[1][0].parent_block_ptr = 3; + all_ops_no_majority[1][0].parent_vtxindex = 0; + all_ops_no_majority[1][0].vtxindex = 0; + all_ops_no_majority[1][0].burn_fee = 2; + + // 4,1 + all_ops_no_majority[1][1].parent_block_ptr = 3; + all_ops_no_majority[1][1].parent_vtxindex = 1; + all_ops_no_majority[1][1].vtxindex = 1; + all_ops_no_majority[1][1].burn_fee = 2; + + // 5,0 + all_ops_no_majority[2][0].parent_block_ptr = 4; + all_ops_no_majority[2][0].parent_vtxindex = 0; + all_ops_no_majority[2][0].vtxindex = 0; + all_ops_no_majority[2][0].burn_fee = 3; + + // 5,1 + all_ops_no_majority[2][1].parent_block_ptr = 4; + all_ops_no_majority[2][1].parent_vtxindex = 1; + all_ops_no_majority[2][1].vtxindex = 1; + all_ops_no_majority[2][1].burn_fee = 3; + + let heaviest_parent_commit_opt = find_heaviest_block_commit( + &burnchain_db.tx_begin().unwrap(), + &headers, + &all_ops_no_majority, + burnchain.pox_constants.anchor_threshold, + ) + .unwrap(); + assert!(heaviest_parent_commit_opt.is_some()); + let (heaviest_parent_block_commit, descendancy) = heaviest_parent_commit_opt.unwrap(); + + // best option wins + assert_eq!( + commits[1][1].as_ref().unwrap(), + &heaviest_parent_block_commit + ); + assert_eq!( + descendancy, + vec![vec![false, true], vec![false, true], vec![false, true]] + ); + } + + #[test] + fn test_update_pox_affirmation_maps_3_forks() { + let first_bhh = BurnchainHeaderHash([0; 32]); + let first_timestamp = 0; + let first_height = 0; + + let mut burnchain = Burnchain::regtest(":memory:"); + burnchain.pox_constants = PoxConstants::new(10, 5, 3, 3, 0, 99, 100); + burnchain.first_block_height = first_height; + burnchain.first_block_hash = first_bhh.clone(); + burnchain.first_block_timestamp = first_timestamp; + + let mut burnchain_db = BurnchainDB::connect(":memory:", &burnchain, true).unwrap(); + + let first_block_header = burnchain_db.get_canonical_chain_tip().unwrap(); + + let mut headers = vec![first_block_header.clone()]; + let key_register = make_simple_key_register(&first_block_header.block_hash, 0, 1); + + // first reward cycle is all (linear) commits, so it must elect an anchor block + let (next_headers, commits_0) = make_reward_cycle( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + vec![None], + ); + + // no anchor blocks recorded, yet! + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { + true + }) + .unwrap(); + eprintln!( + "rc=0: before update: heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(heaviest_am, AffirmationMap::decode("").unwrap()); + assert_eq!(canonical_am, AffirmationMap::decode("n").unwrap()); + + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .is_none()); + + update_pox_affirmation_maps(&mut burnchain_db, &headers, 0, &burnchain).unwrap(); + + // there's only one anchor block + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .is_some()); + + // the anchor block itself affirms nothing, since it isn't built on an anchor block + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { + true + }) + .unwrap(); + eprintln!( + "rc=0: after update: heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(heaviest_am, AffirmationMap::decode("").unwrap()); + assert_eq!(canonical_am, AffirmationMap::decode("p").unwrap()); + + let anchor_block_0 = BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .unwrap() + .0; + eprintln!("anchor block 1 at height {}", anchor_block_0.block_height); + assert!(anchor_block_0.block_height < commits_0[7][0].as_ref().unwrap().block_height); + + // descend from a prepare-phase commit in rc 0, so affirms rc 0's anchor block + let (next_headers, commits_1) = make_reward_cycle( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + vec![commits_0[7][0].clone()], + ); + update_pox_affirmation_maps(&mut burnchain_db, &headers, 1, &burnchain).unwrap(); + + // there's two anchor blocks + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .is_some()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 2) + .unwrap() + .is_some()); + + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { + true + }) + .unwrap(); + eprintln!( + "rc=1: heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(heaviest_am, AffirmationMap::decode("p").unwrap()); + assert_eq!(canonical_am, AffirmationMap::decode("pp").unwrap()); + + // descend from a prepare-phase commit in rc 0, so affirms rc 0's anchor block but not rc + // 1's + assert!(anchor_block_0.block_height < commits_0[6][0].as_ref().unwrap().block_height); + let (next_headers, commits_2) = make_reward_cycle( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + vec![commits_0[6][0].clone()], + ); + update_pox_affirmation_maps(&mut burnchain_db, &headers, 2, &burnchain).unwrap(); + + // there's three anchor blocks + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .is_some()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 2) + .unwrap() + .is_some()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 3) + .unwrap() + .is_some()); + + // there are two equivalently heavy affirmation maps, but the affirmation map discovered later + // is the heaviest. + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { + true + }) + .unwrap(); + eprintln!( + "rc=2: heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(heaviest_am, AffirmationMap::decode("pa").unwrap()); + assert_eq!(canonical_am, AffirmationMap::decode("pap").unwrap()); + + // descend from a prepare-phase commit in rc 0, so affirms rc 0's anchor block, but not rc + // 1's or rc 2's + assert!(anchor_block_0.block_height < commits_0[8][0].as_ref().unwrap().block_height); + let (next_headers, commits_3) = make_reward_cycle( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + vec![commits_0[8][0].clone()], + ); + update_pox_affirmation_maps(&mut burnchain_db, &headers, 3, &burnchain).unwrap(); + + // there are three equivalently heavy affirmation maps, but the affirmation map discovered last + // is the heaviest. + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { + true + }) + .unwrap(); + eprintln!( + "rc=3: heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(heaviest_am, AffirmationMap::decode("paa").unwrap()); + assert_eq!(canonical_am, AffirmationMap::decode("paap").unwrap()); + } + + #[test] + fn test_update_pox_affirmation_maps_unique_anchor_block() { + let first_bhh = BurnchainHeaderHash([0; 32]); + let first_timestamp = 0; + let first_height = 0; + + let mut burnchain = Burnchain::regtest(":memory:"); + burnchain.pox_constants = PoxConstants::new(10, 5, 3, 3, 0, 99, 100); + burnchain.first_block_height = first_height; + burnchain.first_block_hash = first_bhh.clone(); + burnchain.first_block_timestamp = first_timestamp; + + let mut burnchain_db = BurnchainDB::connect(":memory:", &burnchain, true).unwrap(); + + let first_block_header = burnchain_db.get_canonical_chain_tip().unwrap(); + + let mut headers = vec![first_block_header.clone()]; + let key_register = make_simple_key_register(&first_block_header.block_hash, 0, 1); + + // first reward cycle is all (linear) commits, so it must elect an anchor block + let (next_headers, commits_0) = make_reward_cycle( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + vec![None], + ); + + // no anchor blocks recorded, yet! + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { + true + }) + .unwrap(); + eprintln!( + "rc=0: before update: heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(heaviest_am, AffirmationMap::decode("").unwrap()); + assert_eq!(canonical_am, AffirmationMap::decode("n").unwrap()); + + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .is_none()); + + update_pox_affirmation_maps(&mut burnchain_db, &headers, 0, &burnchain).unwrap(); + + // there's only one anchor block + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .is_some()); + + // the anchor block itself affirms nothing, since it isn't built on an anchor block + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { + true + }) + .unwrap(); + eprintln!( + "rc=0: after update: heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(heaviest_am, AffirmationMap::decode("").unwrap()); + assert_eq!(canonical_am, AffirmationMap::decode("p").unwrap()); + + let anchor_block_0 = BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .unwrap() + .0; + eprintln!("anchor block 1 at height {}", anchor_block_0.block_height); + assert!(anchor_block_0.block_height < commits_0[7][0].as_ref().unwrap().block_height); + + // try and select the same anchor block, twice + let mut dup_commits = commits_0.clone(); + for (i, cmts) in dup_commits.iter_mut().enumerate() { + let block_header = BurnchainBlockHeader { + block_height: (i + commits_0.len() + 1) as u64, + block_hash: next_burn_header_hash(), + parent_block_hash: headers + .last() + .map(|blk| blk.block_hash.clone()) + .unwrap_or(first_bhh.clone()), + num_txs: cmts.len() as u64, + timestamp: (i + commits_0.len()) as u64, + }; + + for cmt_opt in cmts.iter_mut() { + if let Some(cmt) = cmt_opt.as_mut() { + cmt.block_height = block_header.block_height; + cmt.parent_block_ptr = anchor_block_0.block_height as u32; + cmt.parent_vtxindex = anchor_block_0.vtxindex as u16; + cmt.burn_parent_modulus = + ((cmt.block_height - 1) % BURN_BLOCK_MINED_AT_MODULUS) as u8; + cmt.burn_header_hash = block_header.block_hash.clone(); + cmt.block_header_hash = next_block_hash(); + } + } + + headers.push(block_header.clone()); + + let cmt_ops: Vec = cmts + .iter() + .filter_map(|op| op.clone()) + .map(|op| BlockstackOperationType::LeaderBlockCommit(op)) + .collect(); + + burnchain_db + .store_new_burnchain_block_ops_unchecked( + &burnchain, + &headers, + &block_header, + &cmt_ops, + ) + .unwrap(); + } + + update_pox_affirmation_maps(&mut burnchain_db, &headers, 1, &burnchain).unwrap(); + + // there's still only one anchor blocks + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .is_some()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 2) + .unwrap() + .is_none()); + + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { + true + }) + .unwrap(); + eprintln!( + "rc=1: heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(heaviest_am, AffirmationMap::decode("").unwrap()); + assert_eq!(canonical_am, AffirmationMap::decode("pn").unwrap()); + } + + #[test] + fn test_update_pox_affirmation_maps_absent() { + let first_bhh = BurnchainHeaderHash([0; 32]); + let first_timestamp = 0; + let first_height = 0; + + let mut burnchain = Burnchain::regtest(":memory:"); + burnchain.pox_constants = PoxConstants::new(10, 5, 3, 3, 0, 99, 100); + burnchain.first_block_height = first_height; + burnchain.first_block_hash = first_bhh.clone(); + burnchain.first_block_timestamp = first_timestamp; + + let mut burnchain_db = BurnchainDB::connect(":memory:", &burnchain, true).unwrap(); + + let first_block_header = burnchain_db.get_canonical_chain_tip().unwrap(); + + let mut headers = vec![first_block_header.clone()]; + let key_register = make_simple_key_register(&first_block_header.block_hash, 0, 1); + + // make two histories -- one with an anchor block, and one without. + let (next_headers, commits_0) = make_reward_cycle( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + vec![None, None], + ); + + // no anchor blocks recorded, yet! + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + assert_eq!(heaviest_am, AffirmationMap::empty()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .is_none()); + + update_pox_affirmation_maps(&mut burnchain_db, &headers, 0, &burnchain).unwrap(); + + // there's only one anchor block, and it's at vtxindex 1 (not 0) + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) + .unwrap() + .is_none()); + assert_eq!( + BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .unwrap() + .0 + .vtxindex, + 1 + ); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 2) + .unwrap() + .is_none()); + + // the anchor block itself affirms nothing + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { + true + }) + .unwrap(); + eprintln!( + "rc=0: heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(heaviest_am, AffirmationMap::decode("").unwrap()); + assert_eq!(canonical_am, AffirmationMap::decode("p").unwrap()); + + for i in 5..10 { + let block_commit = BurnchainDB::get_block_commit( + burnchain_db.conn(), + &commits_0[i][0].as_ref().unwrap().txid, + ) + .unwrap() + .unwrap(); + assert_eq!(block_commit.vtxindex, 0); + + let block_commit_metadata = BurnchainDB::get_commit_metadata( + burnchain_db.conn(), + &block_commit.burn_header_hash, + &block_commit.txid, + ) + .unwrap() + .unwrap(); + assert_eq!(block_commit_metadata.anchor_block_descendant, None); + } + + // build a second reward cycle off of a commit that does _not_ affirm the first anchor + // block + let (next_headers, commits_1) = make_reward_cycle( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + vec![commits_0[9][1].clone(), commits_0[9][0].clone()], + ); + update_pox_affirmation_maps(&mut burnchain_db, &headers, 1, &burnchain).unwrap(); + + // the second anchor block affirms that the first anchor block is missing. + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { + true + }) + .unwrap(); + eprintln!( + "rc=1: heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(heaviest_am, AffirmationMap::decode("a").unwrap()); + assert_eq!(canonical_am, AffirmationMap::decode("ap").unwrap()); + + // build a third reward cycle off of a commit in the second reward cycle, but make it so + // that there is no anchor block mined + let (next_headers, commits_2) = make_reward_cycle_without_anchor( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + vec![commits_1[9][0].clone(), commits_1[9][1].clone()], + ); + update_pox_affirmation_maps(&mut burnchain_db, &headers, 2, &burnchain).unwrap(); + + // there isn't a third anchor block + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .is_some()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 2) + .unwrap() + .is_some()); + + // heaviest _anchor block_ affirmation map is unchanged. + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { + true + }) + .unwrap(); + eprintln!( + "rc=2: heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(heaviest_am, AffirmationMap::decode("a").unwrap()); + assert_eq!(canonical_am, AffirmationMap::decode("apn").unwrap()); + + // build a fourth reward cycle off of a commit in the third reward cycle, but make it so + // that there is no anchor block mined + assert!(commits_2[5][0].is_some()); + assert!(commits_2[5][1].is_some()); + let (next_headers, commits_3) = make_reward_cycle_without_anchor( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + vec![commits_2[5][0].clone(), commits_2[5][1].clone()], + ); + update_pox_affirmation_maps(&mut burnchain_db, &headers, 3, &burnchain).unwrap(); + + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .is_some()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 2) + .unwrap() + .is_some()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 3) + .unwrap() + .is_none()); + + // heaviest _anchor block_ affirmation map is unchanged. + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { + true + }) + .unwrap(); + eprintln!( + "rc=3: heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(heaviest_am, AffirmationMap::decode("a").unwrap()); + assert_eq!(canonical_am, AffirmationMap::decode("apnn").unwrap()); + + // make a fourth fifth cycle, again with a missing anchor block + assert!(commits_3[5][0].is_some()); + assert!(commits_3[5][1].is_some()); + let (next_headers, commits_4) = make_reward_cycle_without_anchor( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + vec![commits_3[5][0].clone(), commits_3[5][1].clone()], + ); + update_pox_affirmation_maps(&mut burnchain_db, &headers, 4, &burnchain).unwrap(); + + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .is_some()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 2) + .unwrap() + .is_some()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 3) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 4) + .unwrap() + .is_none()); + + // heaviest _anchor block_ affirmation map advances + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { + true + }) + .unwrap(); + eprintln!( + "rc=4: heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(heaviest_am, AffirmationMap::decode("a").unwrap()); + assert_eq!(canonical_am, AffirmationMap::decode("apnnn").unwrap()); + + // make a fifth reward cycle, but with an anchor block. Affirms the first anchor block by + // descending from a chain that descends from it. + assert!(commits_4[5][0].is_some()); + assert!(commits_4[5][1].is_some()); + let (next_headers, commits_5) = make_reward_cycle( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + vec![commits_4[5][1].clone(), commits_4[5][0].clone()], + ); + update_pox_affirmation_maps(&mut burnchain_db, &headers, 5, &burnchain).unwrap(); + + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .is_some()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 2) + .unwrap() + .is_some()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 3) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 4) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 5) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 6) + .unwrap() + .is_some()); + + // heaviest _anchor block_ affirmation map advances, since the new anchor block affirms the + // last 4 reward cycles, including the anchor block mined in the first reward cycle + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { + true + }) + .unwrap(); + eprintln!( + "rc=5: heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + // anchor block was chosen in the last reward cycle, and in doing so created the heaviest + // affirmation map for an anchor block, so the canonical affirmation map is + // whatever that last anchor block affirmed + assert_eq!(heaviest_am, AffirmationMap::decode("pannn").unwrap()); + assert_eq!(canonical_am, AffirmationMap::decode("pannnp").unwrap()); + + // make a third history that affirms _nothing_. It should eventually overtake this last + // heaviest affirmation map + let mut start = vec![commits_0[3][1].clone()]; + for i in 0..6 { + let (next_headers, commits) = make_reward_cycle_with_vote( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + start, + false, + ); + update_pox_affirmation_maps(&mut burnchain_db, &headers, 6 + i, &burnchain).unwrap(); + start = vec![commits[5][0].clone()]; + + let heaviest_am = BurnchainDB::get_heaviest_anchor_block_affirmation_map( + burnchain_db.conn(), + &burnchain, + ) + .unwrap(); + let canonical_am = BurnchainDB::get_canonical_affirmation_map( + burnchain_db.conn(), + &burnchain, + |_, _| true, + ) + .unwrap(); + eprintln!( + "rc={}: heaviest = {}, canonical = {}", + 6 + i, + &heaviest_am, + &canonical_am + ); + } + + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { + true + }) + .unwrap(); + eprintln!( + "rc=11: heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(heaviest_am, AffirmationMap::decode("pannn").unwrap()); + assert_eq!( + canonical_am, + AffirmationMap::decode("pannnpnnnnnn").unwrap() + ); + + // other affirmation map should be present + let unaffirmed_am = AffirmationMap::decode("aannnannnnnn").unwrap(); + let am_id = BurnchainDB::get_affirmation_map_id(burnchain_db.conn(), &unaffirmed_am) + .unwrap() + .unwrap(); + let weight = BurnchainDB::get_affirmation_weight(burnchain_db.conn(), am_id) + .unwrap() + .unwrap(); + assert_eq!(weight, 9); + } + + #[test] + fn test_update_pox_affirmation_maps_nothing() { + let first_bhh = BurnchainHeaderHash([0; 32]); + let first_timestamp = 0; + let first_height = 0; + + let mut burnchain = Burnchain::regtest(":memory:"); + burnchain.pox_constants = PoxConstants::new(10, 5, 3, 3, 0, 99, 100); + burnchain.first_block_height = first_height; + burnchain.first_block_hash = first_bhh.clone(); + burnchain.first_block_timestamp = first_timestamp; + + let mut burnchain_db = BurnchainDB::connect(":memory:", &burnchain, true).unwrap(); + + let first_block_header = burnchain_db.get_canonical_chain_tip().unwrap(); + + let mut headers = vec![first_block_header.clone()]; + let key_register = make_simple_key_register(&first_block_header.block_hash, 0, 1); + + // first reward cycle is all (linear) commits, so it must elect an anchor block + let (next_headers, commits_0) = make_reward_cycle( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + vec![None], + ); + + // no anchor blocks recorded, yet! + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + assert_eq!(heaviest_am, AffirmationMap::empty()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .is_none()); + + update_pox_affirmation_maps(&mut burnchain_db, &headers, 0, &burnchain).unwrap(); + + // there's only one anchor block + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .is_some()); + + // the anchor block itself affirms nothing, since it isn't built on an anchor block + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { + true + }) + .unwrap(); + eprintln!( + "rc=0: heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(heaviest_am, AffirmationMap::decode("").unwrap()); + assert_eq!(canonical_am, AffirmationMap::decode("p").unwrap()); + + // build a second reward cycle off of the first, but with no anchor block + let (next_headers, commits_1) = make_reward_cycle_with_vote( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + vec![commits_0[9][0].clone()], + false, + ); + update_pox_affirmation_maps(&mut burnchain_db, &headers, 1, &burnchain).unwrap(); + + // there's still one anchor block + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .is_some()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 2) + .unwrap() + .is_none()); + + // second reward cycle doesn't have an anchor block, so there's no heaviest anchor block + // affirmation map yet + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { + true + }) + .unwrap(); + eprintln!( + "rc=1: heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(heaviest_am, AffirmationMap::decode("").unwrap()); + assert_eq!(canonical_am, AffirmationMap::decode("pn").unwrap()); + + // build a 3rd reward cycle, but it affirms an anchor block + let last_commit_1 = { + let mut last_commit = None; + for i in 0..commits_1.len() { + if commits_1[i][0].is_some() { + last_commit = commits_1[i][0].clone(); + } + } + last_commit + }; + + let (next_headers, commits_2) = make_reward_cycle( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + vec![last_commit_1], + ); + update_pox_affirmation_maps(&mut burnchain_db, &headers, 2, &burnchain).unwrap(); + + // there's two anchor blocks + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .is_some()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 2) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 3) + .unwrap() + .is_some()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 4) + .unwrap() + .is_none()); + + // there's no anchor block in rc 1 + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { + true + }) + .unwrap(); + eprintln!( + "rc=2: heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(heaviest_am, AffirmationMap::decode("pn").unwrap()); + assert_eq!(canonical_am, AffirmationMap::decode("pnp").unwrap()); + + // build a fourth reward cycle, with no vote + let (next_headers, commits_3) = make_reward_cycle_with_vote( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + vec![commits_2[9][0].clone()], + false, + ); + update_pox_affirmation_maps(&mut burnchain_db, &headers, 3, &burnchain).unwrap(); + + // there are three equivalently heavy affirmation maps, but the affirmation map discovered last + // is the heaviest. BUT THIS TIME, MAKE THE UNCONFIRMED ORACLE DENY THAT THIS LAST + // ANCHORED BLOCK EXISTS. + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { + false + }) + .unwrap(); + eprintln!( + "rc=3 (deny): heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(heaviest_am, AffirmationMap::decode("pn").unwrap()); + assert_eq!(canonical_am, AffirmationMap::decode("pnan").unwrap()); + + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { + true + }) + .unwrap(); + eprintln!( + "rc=3 (exist): heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(heaviest_am, AffirmationMap::decode("pn").unwrap()); + assert_eq!(canonical_am, AffirmationMap::decode("pnpn").unwrap()); + } + + #[test] + fn test_update_pox_affirmation_fork_2_cycles() { + let first_bhh = BurnchainHeaderHash([0; 32]); + let first_timestamp = 0; + let first_height = 0; + + let mut burnchain = Burnchain::regtest(":memory:"); + burnchain.pox_constants = PoxConstants::new(5, 2, 2, 25, 5, 99, 100); + burnchain.first_block_height = first_height; + burnchain.first_block_hash = first_bhh.clone(); + burnchain.first_block_timestamp = first_timestamp; + + let mut burnchain_db = BurnchainDB::connect(":memory:", &burnchain, true).unwrap(); + + let first_block_header = burnchain_db.get_canonical_chain_tip().unwrap(); + + let mut headers = vec![first_block_header.clone()]; + let key_register = make_simple_key_register(&first_block_header.block_hash, 0, 1); + + // first reward cycle is all (linear) commits, so it must elect an anchor block + let (next_headers, commits_0) = make_reward_cycle( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + vec![None], + ); + + // no anchor blocks recorded, yet! + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + assert_eq!(heaviest_am, AffirmationMap::empty()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .is_none()); + + update_pox_affirmation_maps(&mut burnchain_db, &headers, 0, &burnchain).unwrap(); + + // there's only one anchor block + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .is_some()); + + // the anchor block itself affirms nothing, since it isn't built on an anchor block + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { + true + }) + .unwrap(); + eprintln!( + "rc=0 (true): heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(heaviest_am, AffirmationMap::decode("").unwrap()); + assert_eq!(canonical_am, AffirmationMap::decode("p").unwrap()); + + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { + false + }) + .unwrap(); + eprintln!( + "rc=0 (false): heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(canonical_am, AffirmationMap::decode("a").unwrap()); + + // build a second reward cycle off of the first + let (next_headers, commits_1) = make_reward_cycle( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + vec![commits_0[4][0].clone()], + ); + update_pox_affirmation_maps(&mut burnchain_db, &headers, 1, &burnchain).unwrap(); + + // there's two anchor blocks + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .is_some()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 2) + .unwrap() + .is_some()); + + // the network affirms two anchor blocks, but the second anchor block only affirms the + // first anchor block. + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { + true + }) + .unwrap(); + eprintln!( + "rc=1 (true): heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(heaviest_am, AffirmationMap::decode("p").unwrap()); + assert_eq!(canonical_am, AffirmationMap::decode("pp").unwrap()); + + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { + false + }) + .unwrap(); + eprintln!( + "rc=1 (false): heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(canonical_am, AffirmationMap::decode("pa").unwrap()); + + // build a third reward cycle off of the first, before the 2nd's anchor block + let (next_headers, commits_2) = make_reward_cycle( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + vec![commits_0[1][0].clone()], + ); + update_pox_affirmation_maps(&mut burnchain_db, &headers, 2, &burnchain).unwrap(); + + // there's four anchor blocks + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .is_some()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 2) + .unwrap() + .is_some()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 3) + .unwrap() + .is_some()); + + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { + true + }) + .unwrap(); + eprintln!( + "rc=2 (true): heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(heaviest_am, AffirmationMap::decode("p").unwrap()); + assert_eq!(canonical_am, AffirmationMap::decode("ppp").unwrap()); + + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { + false + }) + .unwrap(); + eprintln!( + "rc=2 (false): heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(canonical_am, AffirmationMap::decode("paa").unwrap()); + + // build a fourth reward cycle off of the third + let (next_headers, commits_3) = make_reward_cycle( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + vec![commits_2[4][0].clone()], + ); + update_pox_affirmation_maps(&mut burnchain_db, &headers, 3, &burnchain).unwrap(); + + // there's four anchor blocks + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .is_some()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 2) + .unwrap() + .is_some()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 3) + .unwrap() + .is_some()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 4) + .unwrap() + .is_some()); + + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { + true + }) + .unwrap(); + eprintln!( + "rc=3: heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(heaviest_am, AffirmationMap::decode("aap").unwrap()); + assert_eq!(canonical_am, AffirmationMap::decode("aapp").unwrap()); + } + + #[test] + fn test_update_pox_affirmation_fork_duel() { + let first_bhh = BurnchainHeaderHash([0; 32]); + let first_timestamp = 0; + let first_height = 0; + + let mut burnchain = Burnchain::regtest(":memory:"); + burnchain.pox_constants = PoxConstants::new(5, 2, 2, 25, 5, 99, 100); + burnchain.first_block_height = first_height; + burnchain.first_block_hash = first_bhh.clone(); + burnchain.first_block_timestamp = first_timestamp; + + let mut burnchain_db = BurnchainDB::connect(":memory:", &burnchain, true).unwrap(); + + let first_block_header = burnchain_db.get_canonical_chain_tip().unwrap(); + + let mut headers = vec![first_block_header.clone()]; + let key_register = make_simple_key_register(&first_block_header.block_hash, 0, 1); + + // first reward cycle is all (linear) commits, so it must elect an anchor block + let (next_headers, commits_0) = make_reward_cycle( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + vec![None], + ); + + // no anchor blocks recorded, yet! + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + assert_eq!(heaviest_am, AffirmationMap::empty()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .is_none()); + + update_pox_affirmation_maps(&mut burnchain_db, &headers, 0, &burnchain).unwrap(); + + // there's only one anchor block + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .is_some()); + + // the anchor block itself affirms nothing, since it isn't built on an anchor block + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { + true + }) + .unwrap(); + eprintln!( + "rc=0: heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(heaviest_am, AffirmationMap::decode("").unwrap()); + assert_eq!(canonical_am, AffirmationMap::decode("p").unwrap()); + + // build a second reward cycle off of the first, but at the start + assert!(commits_0[1][0].is_some()); + let (next_headers, commits_1) = make_reward_cycle( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + vec![commits_0[1][0].clone()], + ); + update_pox_affirmation_maps(&mut burnchain_db, &headers, 1, &burnchain).unwrap(); + + // there's two anchor blocks + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .is_some()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 2) + .unwrap() + .is_some()); + + // the network affirms two anchor blocks, but the second one wins + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { + true + }) + .unwrap(); + eprintln!( + "rc=1: heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(heaviest_am, AffirmationMap::decode("a").unwrap()); + assert_eq!(canonical_am, AffirmationMap::decode("ap").unwrap()); + + // build a third reward cycle off of the first + assert!(commits_0[4][0].clone().unwrap().block_height == 5); + let (next_headers, commits_2) = make_reward_cycle( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + vec![commits_0[4][0].clone()], + ); + update_pox_affirmation_maps(&mut burnchain_db, &headers, 2, &burnchain).unwrap(); + + // there's four anchor blocks + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .is_some()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 2) + .unwrap() + .is_some()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 3) + .unwrap() + .is_some()); + + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { + true + }) + .unwrap(); + eprintln!( + "rc=2: heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(heaviest_am, AffirmationMap::decode("pa").unwrap()); + assert_eq!(canonical_am, AffirmationMap::decode("pap").unwrap()); + + // build a fourth reward cycle off of the second + assert!(commits_1[4][0].clone().unwrap().block_height == 10); + let (next_headers, commits_3) = make_reward_cycle( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + vec![commits_1[4][0].clone()], + ); + update_pox_affirmation_maps(&mut burnchain_db, &headers, 3, &burnchain).unwrap(); + + // there's four anchor blocks + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .is_some()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 2) + .unwrap() + .is_some()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 3) + .unwrap() + .is_some()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 4) + .unwrap() + .is_some()); + + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { + true + }) + .unwrap(); + eprintln!( + "rc=3: heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(heaviest_am, AffirmationMap::decode("apa").unwrap()); + assert_eq!(canonical_am, AffirmationMap::decode("apap").unwrap()); + } +} From 3be408c55dcaaec6c196218600a38fc6d7890fe5 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 14 Jun 2021 01:40:05 -0400 Subject: [PATCH 008/116] feat: expand the metadata we track for block-commit transactions to include all the data needed to calculate each block-commit's affirmation map. Also, add the routines necessary to deduce the affirmation map for a given reward cycle R, if the affirmation maps for all prior reward cycles up to R are already known. --- src/burnchains/db.rs | 1507 ++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 1441 insertions(+), 66 deletions(-) diff --git a/src/burnchains/db.rs b/src/burnchains/db.rs index f4e45c2358..128612f0c2 100644 --- a/src/burnchains/db.rs +++ b/src/burnchains/db.rs @@ -1,5 +1,5 @@ // Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020 Stacks Open Internet Foundation +// Copyright (C) 2020-2021 Stacks Open Internet Foundation // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by @@ -14,31 +14,40 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::collections::HashMap; -use std::{fs, io}; +use std::fmt; + +use std::collections::{HashMap, HashSet}; +use std::{cmp, fs, io}; use rusqlite::{ types::ToSql, Connection, OpenFlags, OptionalExtension, Row, Transaction, NO_PARAMS, }; use serde_json; +use burnchains::affirmation::*; use burnchains::Txid; -use burnchains::{Burnchain, BurnchainBlock, BurnchainBlockHeader, Error as BurnchainError}; -use chainstate::burn::operations::BlockstackOperationType; +use burnchains::{ + Burnchain, BurnchainBlock, BurnchainBlockHeader, BurnchainSigner, Error as BurnchainError, + PoxConstants, +}; +use chainstate::burn::operations::{ + leader_block_commit::BURN_BLOCK_MINED_AT_MODULUS, BlockstackOperationType, LeaderBlockCommitOp, +}; +use chainstate::burn::BlockSnapshot; use chainstate::stacks::index::MarfTrieId; use util::db::{ - query_row, query_rows, sql_pragma, tx_begin_immediate, tx_busy_handler, u64_to_sql, - Error as DBError, FromColumn, FromRow, + query_row, query_row_panic, query_rows, sql_pragma, tx_begin_immediate, tx_busy_handler, + u64_to_sql, DBConn, Error as DBError, FromColumn, FromRow, }; -use crate::types::chainstate::BurnchainHeaderHash; +use crate::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash}; use crate::types::proof::ClarityMarfTrieId; pub struct BurnchainDB { conn: Connection, } -struct BurnchainDBTransaction<'a> { +pub struct BurnchainDBTransaction<'a> { sql_tx: Transaction<'a>, } @@ -47,6 +56,79 @@ pub struct BurnchainBlockData { pub ops: Vec, } +/// A trait for reading burnchain block headers +pub trait BurnchainHeaderReader { + fn read_burnchain_headers( + &self, + start_height: u64, + end_height: u64, + ) -> Result, DBError>; + fn get_burnchain_headers_height(&self) -> Result; +} + +const NO_ANCHOR_BLOCK: u64 = i64::MAX as u64; + +#[derive(Debug, Clone)] +pub struct BlockCommitMetadata { + pub burn_block_hash: BurnchainHeaderHash, + pub txid: Txid, + pub block_height: u64, + pub vtxindex: u32, + pub affirmation_id: u64, + /// if Some(..), then this block-commit is the anchor block for a reward cycle, and the + /// reward cycle is represented as the inner u64. + pub anchor_block: Option, + /// If Some(..), then this is the anchor block that this block-commit descends from + pub anchor_block_descendant: Option, +} + +impl FromColumn for AffirmationMap { + fn from_column<'a>(row: &'a Row, col_name: &str) -> Result { + let txt: String = row.get_unwrap(col_name); + let am = AffirmationMap::decode(&txt).ok_or(DBError::ParseError)?; + Ok(am) + } +} + +impl FromRow for AffirmationMap { + fn from_row<'a>(row: &'a Row) -> Result { + AffirmationMap::from_column(row, "affirmation_map") + } +} + +impl FromRow for BlockCommitMetadata { + fn from_row<'a>(row: &'a Row) -> Result { + let burn_block_hash = BurnchainHeaderHash::from_column(row, "burn_block_hash")?; + let txid = Txid::from_column(row, "txid")?; + let block_height = u64::from_column(row, "block_height")?; + let vtxindex: u32 = row.get_unwrap("vtxindex"); + let affirmation_id = u64::from_column(row, "affirmation_id")?; + let anchor_block_u64 = u64::from_column(row, "anchor_block")?; + let anchor_block = if anchor_block_u64 != NO_ANCHOR_BLOCK { + Some(anchor_block_u64) + } else { + None + }; + + let anchor_block_descendant_u64 = u64::from_column(row, "anchor_block_descendant")?; + let anchor_block_descendant = if anchor_block_descendant_u64 != NO_ANCHOR_BLOCK { + Some(anchor_block_descendant_u64) + } else { + None + }; + + Ok(BlockCommitMetadata { + burn_block_hash, + txid, + block_height, + vtxindex, + affirmation_id, + anchor_block: anchor_block, + anchor_block_descendant, + }) + } +} + /// Apply safety checks on extracted blockstack transactions /// - put them in order by vtxindex /// - make sure there are no vtxindex duplicates @@ -102,7 +184,7 @@ impl FromRow for BurnchainBlockHeader { impl FromRow for BlockstackOperationType { fn from_row(row: &Row) -> Result { - let serialized = row.get_unwrap::<_, String>("op"); + let serialized: String = row.get_unwrap("op"); let deserialized = serde_json::from_str(&serialized) .expect("CORRUPTION: db store un-deserializable block op"); @@ -110,9 +192,7 @@ impl FromRow for BlockstackOperationType { } } -pub const BURNCHAIN_DB_VERSION: &'static str = "1"; - -const BURNCHAIN_DB_INITIAL_SCHEMA: &'static str = " +const BURNCHAIN_DB_SCHEMA: &'static str = r#" CREATE TABLE burnchain_db_block_headers ( block_height INTEGER NOT NULL, block_hash TEXT UNIQUE NOT NULL, @@ -127,10 +207,45 @@ CREATE TABLE burnchain_db_block_ops ( block_hash TEXT NOT NULL, op TEXT NOT NULL, txid TEXT NOT NULL, + FOREIGN KEY(block_hash) REFERENCES burnchain_db_block_headers(block_hash) ); -CREATE TABLE db_config(version TEXT NOT NULL);"; +CREATE TABLE affirmation_maps ( + affirmation_id INTEGER PRIMARY KEY AUTOINCREMENT, + weight INTEGER NOT NULL, + affirmation_map TEXT NOT NULL +); + +-- ensure anchor block uniqueness +CREATE TABLE anchor_blocks ( + reward_cycle INTEGER PRIMARY KEY -- will be i64::MAX if absent +); + +CREATE TABLE block_commit_metadata ( + burn_block_hash TEXT NOT NULL, + txid TEXT NOT NULL, + block_height INTEGER NOT NULL, + vtxindex INTEGER NOT NULL, + + affirmation_id INTEGER NOT NULL, + anchor_block INTEGER NOT NULL, + anchor_block_descendant INTEGER NOT NULL, + + PRIMARY KEY(burn_block_hash,txid), + FOREIGN KEY(affirmation_id) REFERENCES affirmation_maps(affirmation_id), + FOREIGN KEY(anchor_block) REFERENCES anchor_blocks(reward_cycle) +); + +-- override the canonical affirmation map at the operator's discression +CREATE TABLE overrides ( + reward_cycle INTEGER PRIMARY KEY NOT NULL, + affirmation_map TEXT NOT NULL +); + +INSERT INTO affirmation_maps(affirmation_id,weight,affirmation_map) VALUES (0,0,""); -- empty affirmation map +INSERT INTO anchor_blocks(reward_cycle) VALUES (9223372036854775807); -- non-existant reward cycle (i64::MAX) +"#; impl<'a> BurnchainDBTransaction<'a> { fn store_burnchain_db_entry( @@ -147,16 +262,586 @@ impl<'a> BurnchainDBTransaction<'a> { &u64_to_sql(header.num_txs)?, &u64_to_sql(header.timestamp)?, ]; - match self.sql_tx.execute(sql, args) { Ok(_) => Ok(self.sql_tx.last_insert_rowid()), Err(e) => Err(BurnchainError::from(e)), } } - fn store_blockstack_ops( + fn insert_block_commit_affirmation_map( + &self, + affirmation_map: &AffirmationMap, + ) -> Result { + let weight = affirmation_map.weight(); + let sql = "INSERT INTO affirmation_maps (affirmation_map,weight) VALUES (?1,?2)"; + let args: &[&dyn ToSql] = &[&affirmation_map.encode(), &u64_to_sql(weight)?]; + match self.sql_tx.execute(sql, args) { + Ok(_) => { + let am_id = BurnchainDB::get_affirmation_map_id(&self.sql_tx, &affirmation_map)? + .expect("BUG: no affirmation ID for affirmation map we just inserted"); + Ok(am_id) + } + Err(e) => Err(DBError::SqliteError(e)), + } + } + + fn update_block_commit_affirmation( + &self, + block_commit: &LeaderBlockCommitOp, + anchor_block_descendant: Option, + affirmation_id: u64, + ) -> Result<(), DBError> { + let sql = "UPDATE block_commit_metadata SET affirmation_id = ?1, anchor_block_descendant = ?2 WHERE burn_block_hash = ?3 AND txid = ?4"; + let args: &[&dyn ToSql] = &[ + &u64_to_sql(affirmation_id)?, + &u64_to_sql(anchor_block_descendant.unwrap_or(NO_ANCHOR_BLOCK))?, + &block_commit.burn_header_hash, + &block_commit.txid, + ]; + match self.sql_tx.execute(sql, args) { + Ok(_) => { + test_debug!("Set affirmation map ID of {} - {},{},{} (parent {},{}) to {} (anchor block descendant? {:?})", + &block_commit.burn_header_hash, &block_commit.txid, block_commit.block_height, block_commit.vtxindex, block_commit.parent_block_ptr, block_commit.parent_vtxindex, affirmation_id, &anchor_block_descendant); + Ok(()) + } + Err(e) => Err(DBError::SqliteError(e)), + } + } + + pub fn set_anchor_block( + &self, + block_commit: &LeaderBlockCommitOp, + target_reward_cycle: u64, + ) -> Result<(), DBError> { + let sql = "INSERT OR REPLACE INTO anchor_blocks (reward_cycle) VALUES (?1)"; + let args: &[&dyn ToSql] = &[&u64_to_sql(target_reward_cycle)?]; + self.sql_tx + .execute(sql, args) + .map_err(|e| DBError::SqliteError(e))?; + + let sql = "UPDATE block_commit_metadata SET anchor_block = ?1 WHERE burn_block_hash = ?2 AND txid = ?3"; + let args: &[&dyn ToSql] = &[ + &u64_to_sql(target_reward_cycle)?, + &block_commit.burn_header_hash, + &block_commit.txid, + ]; + match self.sql_tx.execute(sql, args) { + Ok(_) => { + test_debug!( + "Set anchor block for reward cycle {} to {},{},{},{}", + target_reward_cycle, + &block_commit.burn_header_hash, + &block_commit.txid, + &block_commit.block_height, + &block_commit.vtxindex + ); + Ok(()) + } + Err(e) => Err(DBError::SqliteError(e)), + } + } + + pub fn clear_anchor_block(&self, reward_cycle: u64) -> Result<(), DBError> { + let sql = "UPDATE block_commit_metadata SET anchor_block = ?1 WHERE anchor_block = ?2"; + let args: &[&dyn ToSql] = &[&u64_to_sql(NO_ANCHOR_BLOCK)?, &u64_to_sql(reward_cycle)?]; + self.sql_tx + .execute(sql, args) + .map(|_| ()) + .map_err(|e| DBError::SqliteError(e)) + } + + /// Clear the descendancy data and affirmations for all block-commits in a reward cycle + /// (both the reward and prepare phases), as well as anchor block data. + pub fn clear_reward_cycle_descendancies( + &self, + reward_cycle: u64, + burnchain: &Burnchain, + ) -> Result<(), DBError> { + let first_block_height = burnchain.reward_cycle_to_block_height(reward_cycle); + let last_block_height = burnchain.reward_cycle_to_block_height(reward_cycle + 1); + + test_debug!( + "Clear descendancy data for reward cycle {} (blocks {}-{})", + reward_cycle, + first_block_height, + last_block_height + ); + + let sql = "UPDATE block_commit_metadata SET affirmation_id = 0, anchor_block = ?1, anchor_block_descendant = ?2 WHERE block_height >= ?3 AND block_height < ?4"; + let args: &[&dyn ToSql] = &[ + &u64_to_sql(NO_ANCHOR_BLOCK)?, + &u64_to_sql(NO_ANCHOR_BLOCK)?, + &u64_to_sql(first_block_height)?, + &u64_to_sql(last_block_height)?, + ]; + self.sql_tx + .execute(sql, args) + .map(|_| ()) + .map_err(|e| DBError::SqliteError(e)) + } + + /// Calculate a burnchain block's block-commits' descendancy information + pub fn update_block_descendancy( &self, - block_hash: &BurnchainHeaderHash, + indexer: &B, + hdr: &BurnchainBlockHeader, + burnchain: &Burnchain, + ) -> Result<(), BurnchainError> { + // find all block-commits for this block + let commits: Vec = { + let block_ops_qry = "SELECT * FROM burnchain_db_block_ops WHERE block_hash = ?"; + let block_ops = query_rows(&self.sql_tx, block_ops_qry, &[&hdr.block_hash])?; + block_ops + .into_iter() + .filter_map(|op| { + if let BlockstackOperationType::LeaderBlockCommit(opdata) = op { + Some(opdata) + } else { + None + } + }) + .collect() + }; + if commits.len() == 0 { + test_debug!("No block-commits for block {}", hdr.block_height); + return Ok(()); + } + + // for each commit[i], find its parent commit + let mut parent_commits = vec![]; + for commit in commits.iter() { + let parent_commit_opt = if commit.parent_block_ptr != 0 || commit.parent_vtxindex != 0 { + // parent is not genesis + BurnchainDB::get_commit_at( + &self.sql_tx, + indexer, + commit.parent_block_ptr, + commit.parent_vtxindex, + )? + } else { + // parnet is genesis + test_debug!( + "Parent block-commit of {},{},{} is the genesis commit", + &commit.txid, + commit.block_height, + commit.vtxindex + ); + None + }; + + parent_commits.push(parent_commit_opt); + } + assert_eq!(parent_commits.len(), commits.len()); + + // for each parent block-commit and block-commit, calculate the block-commit's new + // affirmation map + for (parent_commit_opt, commit) in parent_commits.iter().zip(commits.iter()) { + if let Some(parent_commit) = parent_commit_opt.as_ref() { + if get_parent_child_reward_cycles(parent_commit, commit, burnchain).is_some() { + // we have enough info to calculate this commit's affirmation + self.make_reward_phase_affirmation_map(burnchain, commit, parent_commit)?; + } else { + // parent is invalid + test_debug!( + "No block-commit parent reward cycle found for {},{},{}", + &commit.txid, + commit.block_height, + commit.vtxindex + ); + self.update_block_commit_affirmation(commit, None, 0) + .map_err(|e| BurnchainError::from(e))?; + } + } else { + if commit.parent_block_ptr == 0 && commit.parent_vtxindex == 0 { + test_debug!( + "Block-commit parent of {},{},{} is genesis", + &commit.txid, + commit.block_height, + commit.vtxindex + ); + } else { + // this is an invalid commit -- no parent found + test_debug!( + "No block-commit parent found for {},{},{}", + &commit.txid, + commit.block_height, + commit.vtxindex + ); + } + self.update_block_commit_affirmation(commit, None, 0) + .map_err(|e| BurnchainError::from(e))?; + } + } + + Ok(()) + } + + /// Update the anchor block descendancy information for the _reward_ phase of a reward cycle. + /// That is, for each block-commit in this reward cycle, mark it as descending from this reward + /// cycle's anchor block (if it exists), or not. If there is no anchor block, then no block in + /// this reward cycle descends from an anchor block. Each reward-phase block-commit's affirmation + /// map is updated by this method. + /// Only call after the reward cycle's prepare phase's affirmation maps and descendancy information has been + /// updated. + pub fn update_reward_phase_descendancies( + &self, + indexer: &B, + reward_cycle: u64, + burnchain: &Burnchain, + ) -> Result<(), BurnchainError> { + let first_block_height = burnchain.reward_cycle_to_block_height(reward_cycle); + let last_block_height = burnchain.reward_cycle_to_block_height(reward_cycle + 1) + - (burnchain.pox_constants.prepare_length as u64); + let hdrs = indexer.read_burnchain_headers(first_block_height, last_block_height)?; + let reward_phase_end = + cmp::min(last_block_height, first_block_height + (hdrs.len() as u64)); + + test_debug!( + "Update reward-phase descendancies for reward cycle {} over {} headers between {}-{}", + reward_cycle, + hdrs.len(), + first_block_height, + reward_phase_end + ); + + for block_height in first_block_height..reward_phase_end { + let hdr = &hdrs[(block_height - first_block_height) as usize]; + self.update_block_descendancy(indexer, hdr, burnchain)?; + } + + test_debug!( + "Updated reward-phase descendancies for reward cycle {}", + reward_cycle + ); + Ok(()) + } + + pub fn make_prepare_phase_affirmation_map( + &self, + indexer: &B, + burnchain: &Burnchain, + reward_cycle: u64, + block_commit: &LeaderBlockCommitOp, + anchor_block: Option<&LeaderBlockCommitOp>, + descends_from_anchor_block: bool, + ) -> Result { + test_debug!( + "Make affirmation map for {},{},{} (parent {},{}) in reward cycle {}", + &block_commit.txid, + block_commit.block_height, + block_commit.vtxindex, + block_commit.parent_block_ptr, + block_commit.parent_vtxindex, + reward_cycle + ); + + let parent = match BurnchainDB::get_commit_at( + &self.sql_tx, + indexer, + block_commit.parent_block_ptr, + block_commit.parent_vtxindex, + )? { + Some(p) => p, + None => { + if block_commit.parent_block_ptr == 0 && block_commit.vtxindex == 0 { + test_debug!( + "Prepare-phase commit {},{},{} builds off of genesis", + &block_commit.block_header_hash, + block_commit.block_height, + block_commit.vtxindex + ); + } else { + test_debug!( + "Prepare-phase commit {},{},{} has no parent, so must be invalid", + &block_commit.block_header_hash, + block_commit.block_height, + block_commit.vtxindex + ); + } + return Ok(0); + } + }; + + let parent_metadata = + BurnchainDB::get_commit_metadata(&self.sql_tx, &parent.burn_header_hash, &parent.txid)? + .expect("BUG: no metadata found for parent block-commit"); + + let (am, affirmed_reward_cycle) = if let Some(ab) = anchor_block { + let anchor_am_id = BurnchainDB::get_block_commit_affirmation_id(&self.sql_tx, &ab)? + .expect("BUG: anchor block has no affirmation map"); + + let mut am = BurnchainDB::get_affirmation_map(&self.sql_tx, anchor_am_id) + .map_err(|e| BurnchainError::from(e))? + .ok_or(BurnchainError::DBError(DBError::NotFoundError))?; + + if descends_from_anchor_block { + test_debug!("Prepare-phase commit {},{},{} descends from anchor block {},{},{} for reward cycle {}", &block_commit.block_header_hash, block_commit.block_height, block_commit.vtxindex, &ab.block_header_hash, ab.block_height, ab.vtxindex, reward_cycle); + am.push(AffirmationMapEntry::PoxAnchorBlockPresent); + (am, Some(reward_cycle)) + } else { + test_debug!("Prepare-phase commit {},{},{} does NOT descend from anchor block {},{},{} for reward cycle {}", &block_commit.block_header_hash, block_commit.block_height, block_commit.vtxindex, &ab.block_header_hash, ab.block_height, ab.vtxindex, reward_cycle); + am.push(AffirmationMapEntry::PoxAnchorBlockAbsent); + (am, parent_metadata.anchor_block_descendant) + } + } else { + let (parent_reward_cycle, _) = + get_parent_child_reward_cycles(&parent, block_commit, burnchain) + .ok_or(BurnchainError::DBError(DBError::NotFoundError))?; + + // load up the affirmation map for the last anchor block the parent affirmed + let (mut am, parent_rc_opt) = match parent_metadata.anchor_block_descendant { + Some(parent_ab_rc) => { + // parent affirmed some past anchor block + let (_, ab_metadata) = BurnchainDB::get_anchor_block_commit(&self.sql_tx, parent_ab_rc)? + .expect(&format!("BUG: parent descends from a reward cycle with an anchor block ({}), but no anchor block found", parent_ab_rc)); + + let mut am = + BurnchainDB::get_affirmation_map(&self.sql_tx, ab_metadata.affirmation_id)? + .expect("BUG: no affirmation map for parent commit's anchor block"); + + test_debug!("Prepare-phase commit {},{},{} does nothing for reward cycle {}, but it builds on its parent which affirms anchor block for reward cycle {} ({}) (affirms? {})", + &block_commit.block_header_hash, block_commit.block_height, block_commit.vtxindex, reward_cycle, parent_ab_rc, &am, (am.len() as u64) < parent_ab_rc); + + if (am.len() as u64) < parent_ab_rc { + // child is affirming the parent + am.push(AffirmationMapEntry::PoxAnchorBlockPresent); + } + + (am, Some(parent_ab_rc)) + } + None => { + let mut parent_am = BurnchainDB::get_affirmation_map( + &self.sql_tx, + parent_metadata.affirmation_id, + )? + .expect("BUG: no affirmation map for parent commit"); + + // parent affirms no anchor blocks + test_debug!("Prepare-phase commit {},{},{} does nothing for reward cycle {}, and it builds on a parent {},{} {} which affirms no anchor block (affirms? {})", + &block_commit.block_header_hash, block_commit.block_height, block_commit.vtxindex, reward_cycle, block_commit.parent_block_ptr, block_commit.parent_vtxindex, &parent_am, (parent_am.len() as u64) < parent_reward_cycle); + + if (parent_am.len() as u64) < parent_reward_cycle { + // child is affirming the parent + parent_am.push(AffirmationMapEntry::Nothing); + } + + (parent_am, None) + } + }; + + let num_affirmed = am.len() as u64; + for rc in (num_affirmed + 1)..(reward_cycle + 1) { + if BurnchainDB::has_anchor_block(&self.sql_tx, rc)? { + test_debug!( + "Commit {},{},{} skips reward cycle {} with anchor block", + &block_commit.block_header_hash, + block_commit.block_height, + block_commit.vtxindex, + rc + ); + am.push(AffirmationMapEntry::PoxAnchorBlockAbsent); + } else { + // affirmation weight increases even if there's no decision made, because + // the lack of a decision is still an affirmation of all prior decisions + test_debug!( + "Commit {},{},{} skips reward cycle {} without anchor block", + &block_commit.block_header_hash, + block_commit.block_height, + block_commit.vtxindex, + rc + ); + am.push(AffirmationMapEntry::Nothing); + } + } + + test_debug!( + "Prepare-phase commit {},{},{} affirms parent {},{} with {} descended from {:?}", + &block_commit.block_header_hash, + block_commit.block_height, + block_commit.vtxindex, + parent.block_height, + parent.vtxindex, + &am, + &parent_metadata.anchor_block_descendant + ); + + (am, parent_rc_opt) + }; + + if let Some(am_id) = BurnchainDB::get_affirmation_map_id(&self.sql_tx, &am) + .map_err(|e| BurnchainError::from(e))? + { + // child doesn't represent any new affirmations by the network, since its + // affirmation map already exists. + if cfg!(test) { + let _am_weight = BurnchainDB::get_affirmation_weight(&self.sql_tx, am_id)? + .expect(&format!("BUG: no affirmation map {}", &am_id)); + + test_debug!("Affirmation map of prepare-phase block-commit {},{},{} (parent {},{}) is old: {:?} weight {} affirmed {:?}", + &block_commit.txid, block_commit.block_height, block_commit.vtxindex, block_commit.parent_block_ptr, block_commit.parent_vtxindex, &am, _am_weight, &affirmed_reward_cycle); + } + + self.update_block_commit_affirmation(block_commit, affirmed_reward_cycle, am_id) + .map_err(|e| BurnchainError::from(e))?; + Ok(am_id) + } else { + test_debug!("Affirmation map of prepare-phase block-commit {},{},{} (parent {},{}) is new: {:?} weight {} affirmed {:?}", + &block_commit.txid, block_commit.block_height, block_commit.vtxindex, block_commit.parent_block_ptr, block_commit.parent_vtxindex, &am, am.weight(), &affirmed_reward_cycle); + + let am_id = self + .insert_block_commit_affirmation_map(&am) + .map_err(|e| BurnchainError::from(e))?; + self.update_block_commit_affirmation(block_commit, affirmed_reward_cycle, am_id) + .map_err(|e| BurnchainError::from(e))?; + Ok(am_id) + } + } + + fn make_reward_phase_affirmation_map( + &self, + burnchain: &Burnchain, + block_commit: &LeaderBlockCommitOp, + parent: &LeaderBlockCommitOp, + ) -> Result { + assert_eq!(block_commit.parent_block_ptr as u64, parent.block_height); + assert_eq!(block_commit.parent_vtxindex as u32, parent.vtxindex); + + let parent_metadata = + BurnchainDB::get_commit_metadata(&self.sql_tx, &parent.burn_header_hash, &parent.txid)? + .expect("BUG: no metadata found for existing block commit"); + + test_debug!( + "Reward-phase commit {},{},{} has parent {},{}, anchor block {:?}", + &block_commit.block_header_hash, + block_commit.block_height, + block_commit.vtxindex, + parent.block_height, + parent.vtxindex, + &parent_metadata.anchor_block_descendant + ); + + let child_reward_cycle = burnchain + .block_height_to_reward_cycle(block_commit.block_height) + .expect("BUG: block commit exists before first block height"); + + let (am, affirmed_anchor_block_reward_cycle) = + if let Some(parent_ab_rc) = parent_metadata.anchor_block_descendant { + let am_id = parent_metadata.affirmation_id; + let mut am = BurnchainDB::get_affirmation_map(&self.sql_tx, am_id)? + .expect("BUG: no affirmation map for parent commit"); + + test_debug!("Affirmation map of parent is {}", &am); + + let start_rc = am.len() as u64; + for rc in (start_rc + 1)..(child_reward_cycle + 1) { + if BurnchainDB::has_anchor_block(&self.sql_tx, rc)? { + test_debug!( + "Commit {},{},{} skips reward cycle {} with anchor block", + &block_commit.block_header_hash, + block_commit.block_height, + block_commit.vtxindex, + rc + ); + am.push(AffirmationMapEntry::PoxAnchorBlockAbsent); + } else { + test_debug!( + "Commit {},{},{} skips reward cycle {} without anchor block", + &block_commit.block_header_hash, + block_commit.block_height, + block_commit.vtxindex, + rc + ); + am.push(AffirmationMapEntry::Nothing); + } + } + + (am, Some(parent_ab_rc)) + } else { + let mut am = AffirmationMap::empty(); + for rc in 1..(child_reward_cycle + 1) { + if BurnchainDB::has_anchor_block(&self.sql_tx, rc)? { + test_debug!( + "Commit {},{},{} skips reward cycle {} with anchor block", + &block_commit.block_header_hash, + block_commit.block_height, + block_commit.vtxindex, + rc + ); + am.push(AffirmationMapEntry::PoxAnchorBlockAbsent); + } else { + test_debug!( + "Commit {},{},{} skips reward cycle {} without anchor block", + &block_commit.block_header_hash, + block_commit.block_height, + block_commit.vtxindex, + rc + ); + am.push(AffirmationMapEntry::Nothing); + } + } + (am, None) + }; + + if let Some(am_id) = BurnchainDB::get_affirmation_map_id(&self.sql_tx, &am) + .map_err(|e| BurnchainError::from(e))? + { + // child doesn't represent any new affirmations by the network, since its + // affirmation map already exists. + if cfg!(test) { + let _am_weight = BurnchainDB::get_affirmation_weight(&self.sql_tx, am_id)? + .expect(&format!("BUG: no affirmation map {}", &am_id)); + + test_debug!("Affirmation map of reward-phase block-commit {},{},{} (parent {},{}) is old: {:?} weight {}", + &block_commit.txid, block_commit.block_height, block_commit.vtxindex, block_commit.parent_block_ptr, block_commit.parent_vtxindex, &am, _am_weight); + } + + self.update_block_commit_affirmation( + block_commit, + affirmed_anchor_block_reward_cycle, + am_id, + ) + .map_err(|e| BurnchainError::from(e))?; + Ok(am_id) + } else { + test_debug!("Affirmation map of reward-phase block-commit {},{},{} (parent {},{}) is new: {:?} weight {}", + &block_commit.txid, block_commit.block_height, block_commit.vtxindex, block_commit.parent_block_ptr, block_commit.parent_vtxindex, &am, am.weight()); + + let am_id = self + .insert_block_commit_affirmation_map(&am) + .map_err(|e| BurnchainError::from(e))?; + self.update_block_commit_affirmation( + block_commit, + affirmed_anchor_block_reward_cycle, + am_id, + ) + .map_err(|e| BurnchainError::from(e))?; + Ok(am_id) + } + } + + fn insert_block_commit_metadata(&self, bcm: BlockCommitMetadata) -> Result<(), BurnchainError> { + let commit_metadata_sql = "INSERT OR REPLACE INTO block_commit_metadata + (burn_block_hash, txid, block_height, vtxindex, anchor_block, anchor_block_descendant, affirmation_id) + VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)"; + let mut stmt = self.sql_tx.prepare(commit_metadata_sql)?; + let args: &[&dyn ToSql] = &[ + &bcm.burn_block_hash, + &bcm.txid, + &u64_to_sql(bcm.block_height)?, + &bcm.vtxindex, + &u64_to_sql(bcm.anchor_block.unwrap_or(NO_ANCHOR_BLOCK))?, + &u64_to_sql(bcm.anchor_block_descendant.unwrap_or(NO_ANCHOR_BLOCK))?, + &u64_to_sql(bcm.affirmation_id)?, + ]; + stmt.execute(args)?; + Ok(()) + } + + fn store_blockstack_ops( + &self, + burnchain: &Burnchain, + indexer: &B, + block_header: &BurnchainBlockHeader, block_ops: &[BlockstackOperationType], ) -> Result<(), BurnchainError> { let sql = "INSERT INTO burnchain_db_block_ops @@ -165,23 +850,74 @@ impl<'a> BurnchainDBTransaction<'a> { for op in block_ops.iter() { let serialized_op = serde_json::to_string(op).expect("Failed to serialize parsed BlockstackOp"); - let args: &[&dyn ToSql] = &[block_hash, op.txid_ref(), &serialized_op]; + let args: &[&dyn ToSql] = &[&block_header.block_hash, op.txid_ref(), &serialized_op]; stmt.execute(args)?; } + + for op in block_ops.iter() { + if let BlockstackOperationType::LeaderBlockCommit(ref opdata) = op { + let bcm = BlockCommitMetadata { + burn_block_hash: block_header.block_hash.clone(), + txid: opdata.txid.clone(), + block_height: opdata.block_height, + vtxindex: opdata.vtxindex, + // NOTE: these fields are filled in by the subsequent call. + affirmation_id: 0, + anchor_block: None, + anchor_block_descendant: None, + }; + self.insert_block_commit_metadata(bcm)?; + } + } + + self.update_block_descendancy(indexer, block_header, burnchain)?; Ok(()) } - fn commit(self) -> Result<(), BurnchainError> { + pub fn commit(self) -> Result<(), BurnchainError> { self.sql_tx.commit().map_err(BurnchainError::from) } + + pub fn conn(&self) -> &DBConn { + &self.sql_tx + } + + pub fn get_canonical_chain_tip(&self) -> Result { + let qry = "SELECT * FROM burnchain_db_block_headers ORDER BY block_height DESC, block_hash ASC LIMIT 1"; + let opt = query_row(&self.sql_tx, qry, NO_PARAMS)?; + Ok(opt.expect("CORRUPTION: No canonical burnchain tip")) + } + + /// You'd only do this in network emergencies, where node operators are expected to declare an + /// anchor block missing (or present). Ideally there'd be a smart contract somewhere for this. + pub fn set_override_affirmation_map( + &self, + reward_cycle: u64, + affirmation_map: AffirmationMap, + ) -> Result<(), DBError> { + assert_eq!((affirmation_map.len() as u64) + 1, reward_cycle); + let qry = "INSERT INTO overrides (reward_cycle, affirmation_map) VALUES (?1, ?2)"; + let args: &[&dyn ToSql] = &[&u64_to_sql(reward_cycle)?, &affirmation_map.encode()]; + + let mut stmt = self.sql_tx.prepare(qry)?; + stmt.execute(args)?; + Ok(()) + } + + pub fn clear_override_affirmation_map(&self, reward_cycle: u64) -> Result<(), DBError> { + let qry = "DELETE FROM overrides WHERE reward_cycle = ?1"; + let args: &[&dyn ToSql] = &[&u64_to_sql(reward_cycle)?]; + + let mut stmt = self.sql_tx.prepare(qry)?; + stmt.execute(args)?; + Ok(()) + } } impl BurnchainDB { pub fn connect( path: &str, - first_block_height: u64, - first_burn_header_hash: &BurnchainHeaderHash, - first_burn_header_timestamp: u64, + burnchain: &Burnchain, readwrite: bool, ) -> Result { let mut create_flag = false; @@ -219,22 +955,33 @@ impl BurnchainDB { if create_flag { let db_tx = db.tx_begin()?; sql_pragma(&db_tx.sql_tx, "PRAGMA journal_mode = WAL;")?; - db_tx.sql_tx.execute_batch(BURNCHAIN_DB_INITIAL_SCHEMA)?; - - db_tx.sql_tx.execute( - "INSERT INTO db_config (version) VALUES (?1)", - &[&BURNCHAIN_DB_VERSION], - )?; + db_tx.sql_tx.execute_batch(BURNCHAIN_DB_SCHEMA)?; let first_block_header = BurnchainBlockHeader { - block_height: first_block_height, - block_hash: first_burn_header_hash.clone(), - timestamp: first_burn_header_timestamp, + block_height: burnchain.first_block_height, + block_hash: burnchain.first_block_hash.clone(), + timestamp: burnchain.first_block_timestamp.into(), num_txs: 0, parent_block_hash: BurnchainHeaderHash::sentinel(), }; db_tx.store_burnchain_db_entry(&first_block_header)?; + + let first_snapshot = BlockSnapshot::initial( + burnchain.first_block_height, + &burnchain.first_block_hash, + burnchain.first_block_timestamp as u64, + ); + let first_snapshot_commit_metadata = BlockCommitMetadata { + burn_block_hash: first_snapshot.burn_header_hash.clone(), + txid: first_snapshot.winning_block_txid.clone(), + block_height: first_snapshot.block_height, + vtxindex: 0, + affirmation_id: 0, + anchor_block: None, + anchor_block_descendant: None, + }; + db_tx.insert_block_commit_metadata(first_snapshot_commit_metadata)?; db_tx.commit()?; } @@ -253,28 +1000,45 @@ impl BurnchainDB { Ok(BurnchainDB { conn }) } - fn tx_begin<'a>(&'a mut self) -> Result, BurnchainError> { + pub fn conn(&self) -> &DBConn { + &self.conn + } + + pub fn tx_begin<'a>(&'a mut self) -> Result, BurnchainError> { let sql_tx = tx_begin_immediate(&mut self.conn)?; Ok(BurnchainDBTransaction { sql_tx: sql_tx }) } - pub fn get_canonical_chain_tip(&self) -> Result { + fn inner_get_canonical_chain_tip( + conn: &DBConn, + ) -> Result { let qry = "SELECT * FROM burnchain_db_block_headers ORDER BY block_height DESC, block_hash ASC LIMIT 1"; + let opt = query_row(conn, qry, NO_PARAMS)?; + Ok(opt.expect("CORRUPTION: No canonical burnchain tip")) + } + + pub fn get_canonical_chain_tip(&self) -> Result { + BurnchainDB::inner_get_canonical_chain_tip(&self.conn) + } + + #[cfg(test)] + pub fn get_first_header(&self) -> Result { + let qry = "SELECT * FROM burnchain_db_block_headers ORDER BY block_height ASC, block_hash DESC LIMIT 1"; let opt = query_row(&self.conn, qry, NO_PARAMS)?; Ok(opt.expect("CORRUPTION: No canonical burnchain tip")) } pub fn get_burnchain_block( - &self, + conn: &DBConn, block: &BurnchainHeaderHash, ) -> Result { let block_header_qry = "SELECT * FROM burnchain_db_block_headers WHERE block_hash = ? LIMIT 1"; let block_ops_qry = "SELECT * FROM burnchain_db_block_ops WHERE block_hash = ?"; - let block_header = query_row(&self.conn, block_header_qry, &[block])? + let block_header = query_row(conn, block_header_qry, &[block])? .ok_or_else(|| BurnchainError::UnknownBlock(block.clone()))?; - let block_ops = query_rows(&self.conn, block_ops_qry, &[block])?; + let block_ops = query_rows(conn, block_ops_qry, &[block])?; Ok(BurnchainBlockData { header: block_header, @@ -282,10 +1046,10 @@ impl BurnchainDB { }) } - pub fn get_burnchain_op(&self, txid: &Txid) -> Option { + fn inner_get_burnchain_op(conn: &DBConn, txid: &Txid) -> Option { let qry = "SELECT op FROM burnchain_db_block_ops WHERE txid = ?"; - match query_row(&self.conn, qry, &[txid]) { + match query_row(conn, qry, &[txid]) { Ok(res) => res, Err(e) => { warn!( @@ -297,6 +1061,10 @@ impl BurnchainDB { } } + pub fn get_burnchain_op(&self, txid: &Txid) -> Option { + BurnchainDB::inner_get_burnchain_op(&self.conn, txid) + } + /// Filter out the burnchain block's transactions that could be blockstack transactions. /// Return the ordered list of blockstack operations by vtxindex fn get_blockstack_transactions( @@ -337,9 +1105,146 @@ impl BurnchainDB { ops } - pub fn store_new_burnchain_block( + pub fn get_affirmation_map( + conn: &DBConn, + affirmation_id: u64, + ) -> Result, DBError> { + let sql = "SELECT affirmation_map FROM affirmation_maps WHERE affirmation_id = ?1"; + let args: &[&dyn ToSql] = &[&u64_to_sql(affirmation_id)?]; + query_row(conn, sql, args) + } + + pub fn get_affirmation_weight( + conn: &DBConn, + affirmation_id: u64, + ) -> Result, DBError> { + let sql = "SELECT weight FROM affirmation_maps WHERE affirmation_id = ?1"; + let args: &[&dyn ToSql] = &[&u64_to_sql(affirmation_id)?]; + query_row(conn, sql, args) + } + + pub fn get_affirmation_map_id( + conn: &DBConn, + affirmation_map: &AffirmationMap, + ) -> Result, DBError> { + let sql = "SELECT affirmation_id FROM affirmation_maps WHERE affirmation_map = ?1"; + let args: &[&dyn ToSql] = &[&affirmation_map.encode()]; + query_row(conn, sql, args) + } + + pub fn get_affirmation_map_id_at( + conn: &DBConn, + burn_header_hash: &BurnchainHeaderHash, + txid: &Txid, + ) -> Result, DBError> { + let sql = "SELECT affirmation_id FROM block_commit_metadata WHERE burn_block_hash = ?1 AND txid = ?2"; + let args: &[&dyn ToSql] = &[burn_header_hash, txid]; + query_row(conn, sql, args) + } + + pub fn get_affirmation_map_at( + conn: &DBConn, + burn_header_hash: &BurnchainHeaderHash, + txid: &Txid, + ) -> Result, DBError> { + let am_id_opt = BurnchainDB::get_affirmation_map_id_at(conn, burn_header_hash, txid)?; + match am_id_opt { + Some(am_id) => BurnchainDB::get_affirmation_map(conn, am_id), + None => Ok(None), + } + } + + pub fn get_block_commit_affirmation_id( + conn: &DBConn, + block_commit: &LeaderBlockCommitOp, + ) -> Result, DBError> { + BurnchainDB::get_affirmation_map_id_at( + conn, + &block_commit.burn_header_hash, + &block_commit.txid, + ) + } + + pub fn is_anchor_block( + conn: &DBConn, + burn_header_hash: &BurnchainHeaderHash, + txid: &Txid, + ) -> Result { + let sql = "SELECT 1 FROM block_commit_metadata WHERE anchor_block != ?1 AND burn_block_hash = ?2 AND txid = ?3"; + let args: &[&dyn ToSql] = &[&u64_to_sql(NO_ANCHOR_BLOCK)?, burn_header_hash, txid]; + query_row(conn, sql, args)?.ok_or(DBError::NotFoundError) + } + + pub fn has_anchor_block(conn: &DBConn, reward_cycle: u64) -> Result { + let sql = "SELECT 1 FROM block_commit_metadata WHERE anchor_block = ?1"; + let args: &[&dyn ToSql] = &[&u64_to_sql(reward_cycle)?]; + Ok(query_row::(conn, sql, args)?.is_some()) + } + + pub fn get_anchor_block_commit( + conn: &DBConn, + reward_cycle: u64, + ) -> Result, DBError> { + if reward_cycle == NO_ANCHOR_BLOCK { + return Ok(None); + } + + let sql = "SELECT * FROM block_commit_metadata WHERE anchor_block = ?1"; + let args: &[&dyn ToSql] = &[&u64_to_sql(reward_cycle)?]; + let commit_metadata = match query_row::(conn, sql, args)? { + Some(cmt) => cmt, + None => { + return Ok(None); + } + }; + + let commit = BurnchainDB::get_block_commit(conn, &commit_metadata.txid)? + .expect("BUG: no block-commit for block-commit metadata"); + + Ok(Some((commit, commit_metadata))) + } + + pub fn get_block_commit_affirmation_map( + conn: &DBConn, + block_commit: &LeaderBlockCommitOp, + ) -> Result, DBError> { + let am_id = match BurnchainDB::get_block_commit_affirmation_id(conn, block_commit)? { + Some(am_id) => am_id, + None => { + return Ok(None); + } + }; + + BurnchainDB::get_affirmation_map(conn, am_id) + } + + // do NOT call directly; only use in tests + pub fn store_new_burnchain_block_ops_unchecked( + &mut self, + burnchain: &Burnchain, + indexer: &B, + block_header: &BurnchainBlockHeader, + blockstack_ops: &Vec, + ) -> Result<(), BurnchainError> { + let db_tx = self.tx_begin()?; + + test_debug!( + "Store block {},{} with {} ops", + &block_header.block_hash, + block_header.block_height, + blockstack_ops.len() + ); + db_tx.store_burnchain_db_entry(block_header)?; + db_tx.store_blockstack_ops(burnchain, indexer, &block_header, blockstack_ops)?; + + db_tx.commit()?; + Ok(()) + } + + pub fn store_new_burnchain_block( &mut self, burnchain: &Burnchain, + indexer: &B, block: &BurnchainBlock, ) -> Result, BurnchainError> { let header = block.header(); @@ -348,19 +1253,15 @@ impl BurnchainDB { let mut blockstack_ops = self.get_blockstack_transactions(burnchain, block, &header); apply_blockstack_txs_safety_checks(header.block_height, &mut blockstack_ops); - let db_tx = self.tx_begin()?; - - db_tx.store_burnchain_db_entry(&header)?; - db_tx.store_blockstack_ops(&header.block_hash, &blockstack_ops)?; - - db_tx.commit()?; - + self.store_new_burnchain_block_ops_unchecked(burnchain, indexer, &header, &blockstack_ops)?; Ok(blockstack_ops) } #[cfg(test)] - pub fn raw_store_burnchain_block( + pub fn raw_store_burnchain_block( &mut self, + burnchain: &Burnchain, + indexer: &B, header: BurnchainBlockHeader, mut blockstack_ops: Vec, ) -> Result<(), BurnchainError> { @@ -369,30 +1270,302 @@ impl BurnchainDB { let db_tx = self.tx_begin()?; db_tx.store_burnchain_db_entry(&header)?; - db_tx.store_blockstack_ops(&header.block_hash, &blockstack_ops)?; + db_tx.store_blockstack_ops(burnchain, indexer, &header, &blockstack_ops)?; db_tx.commit()?; Ok(()) } + + pub fn get_block_commit( + conn: &DBConn, + txid: &Txid, + ) -> Result, DBError> { + let op = BurnchainDB::inner_get_burnchain_op(conn, txid); + if let Some(BlockstackOperationType::LeaderBlockCommit(opdata)) = op { + Ok(Some(opdata)) + } else { + test_debug!("No block-commit tx {}", &txid); + Ok(None) + } + } + + pub fn get_commit_in_block_at( + conn: &DBConn, + header_hash: &BurnchainHeaderHash, + block_ptr: u32, + vtxindex: u16, + ) -> Result, DBError> { + let qry = "SELECT txid FROM block_commit_metadata WHERE block_height = ?1 AND vtxindex = ?2 AND burn_block_hash = ?3"; + let args: &[&dyn ToSql] = &[&block_ptr, &vtxindex, &header_hash]; + let txid = match query_row(&conn, qry, args) { + Ok(Some(txid)) => txid, + Ok(None) => { + test_debug!( + "No block-commit metadata at block {}: {},{}", + &header_hash, + &block_ptr, + &vtxindex + ); + return Ok(None); + } + Err(e) => { + debug!( + "BurnchainDB Error {:?} finding PoX affirmation at {},{} in {:?}", + e, block_ptr, vtxindex, &header_hash + ); + return Ok(None); + } + }; + + BurnchainDB::get_block_commit(conn, &txid) + } + + pub fn get_commit_at( + conn: &DBConn, + indexer: &B, + block_ptr: u32, + vtxindex: u16, + ) -> Result, DBError> { + let header_hash = match indexer + .read_burnchain_headers(block_ptr as u64, (block_ptr + 1) as u64)? + .first() + { + Some(hdr) => hdr.block_hash, + None => { + test_debug!("No headers at height {}", block_ptr); + return Ok(None); + } + }; + + BurnchainDB::get_commit_in_block_at(conn, &header_hash, block_ptr, vtxindex) + } + + pub fn get_commit_metadata( + conn: &DBConn, + burn_block_hash: &BurnchainHeaderHash, + txid: &Txid, + ) -> Result, DBError> { + let args: &[&dyn ToSql] = &[burn_block_hash, txid]; + query_row_panic( + conn, + "SELECT * FROM block_commit_metadata WHERE burn_block_hash = ?1 AND txid = ?2", + args, + || { + format!( + "BUG: more than one block-commit {},{}", + burn_block_hash, txid + ) + }, + ) + } + + pub fn get_commit_metadata_at( + conn: &DBConn, + indexer: &B, + block_ptr: u32, + vtxindex: u16, + ) -> Result, DBError> { + let header_hash = match indexer + .read_burnchain_headers(block_ptr as u64, (block_ptr + 1) as u64)? + .first() + { + Some(hdr) => hdr.block_hash, + None => { + test_debug!("No headers at height {}", block_ptr); + return Ok(None); + } + }; + + let commit = BurnchainDB::get_commit_in_block_at(conn, &header_hash, block_ptr, vtxindex)? + .expect(&format!( + "BUG: no metadata for stored block-commit {},{},{})", + &header_hash, block_ptr, vtxindex + )); + + BurnchainDB::get_commit_metadata(conn, &header_hash, &commit.txid) + } + + /// Get the block-commit and block metadata for the anchor block with the heaviest affirmation + /// weight. + pub fn get_heaviest_anchor_block( + conn: &DBConn, + ) -> Result, DBError> { + match query_row::( + conn, "SELECT block_commit_metadata.* \ + FROM affirmation_maps JOIN block_commit_metadata ON affirmation_maps.affirmation_id = block_commit_metadata.affirmation_id \ + WHERE block_commit_metadata.anchor_block != ?1 \ + ORDER BY affirmation_maps.weight DESC, block_commit_metadata.anchor_block DESC", + &[&u64_to_sql(NO_ANCHOR_BLOCK)?] + )? { + Some(metadata) => { + let commit = BurnchainDB::get_block_commit(conn, &metadata.txid)? + .expect("BUG: no block commit for existing metadata"); + + Ok(Some((commit, metadata))) + } + None => { + test_debug!("No anchor block affirmations maps"); + Ok(None) + } + } + } + + /// Find the affirmation map of the anchor block whose affirmation map is the heaviest. + /// In the event of a tie, pick the one from the anchor block of the latest reward cycle. + pub fn get_heaviest_anchor_block_affirmation_map( + conn: &DBConn, + burnchain: &Burnchain, + ) -> Result { + match BurnchainDB::get_heaviest_anchor_block(conn)? { + Some((_, metadata)) => { + let last_reward_cycle = burnchain + .block_height_to_reward_cycle(metadata.block_height) + .unwrap_or(0) + + 1; + + // is there an override set for this reward cycle? + if let Some(am) = + BurnchainDB::get_override_affirmation_map(conn, last_reward_cycle)? + { + warn!( + "Overriding heaviest affirmation map for reward cycle {} to {}", + last_reward_cycle, &am + ); + return Ok(am); + } + + let am = BurnchainDB::get_affirmation_map(conn, metadata.affirmation_id)?.expect( + &format!( + "BUG: failed to load affirmation map {}", + metadata.affirmation_id + ), + ); + + if cfg!(test) { + let _weight = + BurnchainDB::get_affirmation_weight(conn, metadata.affirmation_id)?.expect( + &format!( + "BUG: have affirmation map {} but no weight", + &metadata.affirmation_id + ), + ); + + test_debug!( + "Heaviest anchor block affirmation map is {:?} (ID {}, weight {})", + &am, + metadata.affirmation_id, + _weight + ); + } + Ok(am) + } + None => { + test_debug!("No anchor block affirmations maps"); + Ok(AffirmationMap::empty()) + } + } + } + + /// Load an overridden affirmation map. + /// You'd only do this in network emergencies, where node operators are expected to declare an + /// anchor block missing (or present). Ideally there'd be a smart contract somewhere for this. + pub fn get_override_affirmation_map( + conn: &DBConn, + reward_cycle: u64, + ) -> Result, DBError> { + let am_opt: Option = query_row_panic( + conn, + "SELECT affirmation_map FROM overrides WHERE reward_cycle = ?1", + &[&u64_to_sql(reward_cycle)?], + || format!("BUG: more than one override affirmation map for the same reward cycle"), + )?; + if let Some(am) = &am_opt { + assert_eq!((am.len() + 1) as u64, reward_cycle); + } + Ok(am_opt) + } + + /// Get the canonical affirmation map. This is the heaviest anchor block affirmation map, but + /// accounting for any subsequent reward cycles whose anchor blocks either aren't on the + /// heaviest anchor block affirmation map, or which have no anchor blocks. + pub fn get_canonical_affirmation_map( + conn: &DBConn, + burnchain: &Burnchain, + mut unconfirmed_oracle: F, + ) -> Result + where + F: FnMut(LeaderBlockCommitOp, BlockCommitMetadata) -> bool, + { + let canonical_tip = + BurnchainDB::inner_get_canonical_chain_tip(conn).map_err(|e| match e { + BurnchainError::DBError(dbe) => dbe, + _ => DBError::Other(format!("Burnchain error: {:?}", &e)), + })?; + + let last_reward_cycle = burnchain + .block_height_to_reward_cycle(canonical_tip.block_height) + .unwrap_or(0) + + 1; + + // is there an override set for this reward cycle? + if let Some(am) = BurnchainDB::get_override_affirmation_map(conn, last_reward_cycle)? { + warn!( + "Overriding heaviest affirmation map for reward cycle {} to {}", + last_reward_cycle, &am + ); + return Ok(am); + } + + let mut heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(conn, burnchain)?; + let start_rc = (heaviest_am.len() as u64) + 1; + + test_debug!( + "Add reward cycles {}-{} to heaviest anchor block affirmation map {}", + start_rc, + last_reward_cycle, + &heaviest_am + ); + for rc in start_rc..last_reward_cycle { + if let Some((commit, metadata)) = BurnchainDB::get_anchor_block_commit(conn, rc)? { + let present = unconfirmed_oracle(commit, metadata); + if present { + test_debug!("Assume present anchor block at {}", rc); + heaviest_am.push(AffirmationMapEntry::PoxAnchorBlockPresent); + } else { + test_debug!("Assume absent anchor block at {}", rc); + heaviest_am.push(AffirmationMapEntry::PoxAnchorBlockAbsent); + } + } else { + test_debug!("Assume no anchor block at {}", rc); + heaviest_am.push(AffirmationMapEntry::Nothing); + } + } + + Ok(heaviest_am) + } } #[cfg(test)] -mod tests { +pub mod tests { use std::convert::TryInto; + use address::*; use burnchains::bitcoin::address::*; use burnchains::bitcoin::blocks::*; use burnchains::bitcoin::*; use burnchains::PoxConstants; use burnchains::BLOCKSTACK_MAGIC_MAINNET; use chainstate::burn::*; + use chainstate::coordinator::tests::*; use chainstate::stacks::*; use deps::bitcoin::blockdata::transaction::Transaction as BtcTx; use deps::bitcoin::network::serialize::deserialize; use util::hash::*; use crate::types::chainstate::StacksAddress; + use crate::types::chainstate::VRFSeed; use super::*; @@ -401,30 +1574,51 @@ mod tests { deserialize(&tx_bin.to_vec()).unwrap() } + impl BurnchainHeaderReader for Vec { + fn read_burnchain_headers( + &self, + start_height: u64, + end_height: u64, + ) -> Result, DBError> { + if start_height >= self.len() as u64 { + return Ok(vec![]); + } + let end = cmp::min(end_height, self.len() as u64) as usize; + Ok(self[(start_height as usize)..end].to_vec()) + } + + fn get_burnchain_headers_height(&self) -> Result { + Ok(self.len() as u64) + } + } + #[test] fn test_store_and_fetch() { let first_bhh = BurnchainHeaderHash([0; 32]); let first_timestamp = 321; let first_height = 1; - let mut burnchain_db = - BurnchainDB::connect(":memory:", first_height, &first_bhh, first_timestamp, true) - .unwrap(); - let mut burnchain = Burnchain::regtest(":memory:"); burnchain.pox_constants = PoxConstants::test_default(); burnchain.pox_constants.sunset_start = 999; burnchain.pox_constants.sunset_end = 1000; + burnchain.first_block_height = first_height; + burnchain.first_block_hash = first_bhh.clone(); + burnchain.first_block_timestamp = first_timestamp; + + let mut burnchain_db = BurnchainDB::connect(":memory:", &burnchain, true).unwrap(); + let first_block_header = burnchain_db.get_canonical_chain_tip().unwrap(); assert_eq!(&first_block_header.block_hash, &first_bhh); assert_eq!(&first_block_header.block_height, &first_height); - assert_eq!(&first_block_header.timestamp, &first_timestamp); + assert_eq!(first_block_header.timestamp, first_timestamp as u64); assert_eq!( &first_block_header.parent_block_hash, &BurnchainHeaderHash::sentinel() ); + let headers = vec![first_block_header.clone()]; let canon_hash = BurnchainHeaderHash([1; 32]); let canonical_block = BurnchainBlock::Bitcoin(BitcoinBlock::new( @@ -435,7 +1629,7 @@ mod tests { 485, )); let ops = burnchain_db - .store_new_burnchain_block(&burnchain, &canonical_block) + .store_new_burnchain_block(&burnchain, &headers, &canonical_block) .unwrap(); assert_eq!(ops.len(), 0); @@ -473,7 +1667,7 @@ mod tests { )); let ops = burnchain_db - .store_new_burnchain_block(&burnchain, &non_canonical_block) + .store_new_burnchain_block(&burnchain, &headers, &non_canonical_block) .unwrap(); assert_eq!(ops.len(), expected_ops.len()); for op in ops.iter() { @@ -489,7 +1683,7 @@ mod tests { } let BurnchainBlockData { header, ops } = - burnchain_db.get_burnchain_block(&non_canon_hash).unwrap(); + BurnchainDB::get_burnchain_block(&burnchain_db.conn, &non_canon_hash).unwrap(); assert_eq!(ops.len(), expected_ops.len()); for op in ops.iter() { let expected_op = expected_ops @@ -508,7 +1702,7 @@ mod tests { assert_eq!(&looked_up_canon, &canonical_block.header()); let BurnchainBlockData { header, ops } = - burnchain_db.get_burnchain_block(&canon_hash).unwrap(); + BurnchainDB::get_burnchain_block(&burnchain_db.conn, &canon_hash).unwrap(); assert_eq!(ops.len(), 0); assert_eq!(&header, &looked_up_canon); } @@ -519,25 +1713,28 @@ mod tests { let first_timestamp = 321; let first_height = 1; - let mut burnchain_db = - BurnchainDB::connect(":memory:", first_height, &first_bhh, first_timestamp, true) - .unwrap(); - let mut burnchain = Burnchain::regtest(":memory:"); burnchain.pox_constants = PoxConstants::test_default(); burnchain.pox_constants.sunset_start = 999; burnchain.pox_constants.sunset_end = 1000; + burnchain.first_block_height = first_height; + burnchain.first_block_hash = first_bhh.clone(); + burnchain.first_block_timestamp = first_timestamp; + + let mut burnchain_db = BurnchainDB::connect(":memory:", &burnchain, true).unwrap(); + let first_block_header = burnchain_db.get_canonical_chain_tip().unwrap(); assert_eq!(&first_block_header.block_hash, &first_bhh); assert_eq!(&first_block_header.block_height, &first_height); - assert_eq!(&first_block_header.timestamp, &first_timestamp); + assert_eq!(first_block_header.timestamp, first_timestamp as u64); assert_eq!( &first_block_header.parent_block_hash, &BurnchainHeaderHash::sentinel() ); let canon_hash = BurnchainHeaderHash([1; 32]); + let mut headers = vec![first_block_header.clone()]; let canonical_block = BurnchainBlock::Bitcoin(BitcoinBlock::new( 500, @@ -547,7 +1744,7 @@ mod tests { 485, )); let ops = burnchain_db - .store_new_burnchain_block(&burnchain, &canonical_block) + .store_new_burnchain_block(&burnchain, &headers, &canonical_block) .unwrap(); assert_eq!(ops.len(), 0); @@ -689,6 +1886,14 @@ mod tests { 350, )); + headers.push(BurnchainBlockHeader { + block_height: first_block_header.block_height + 1, + block_hash: block_hash_0.clone(), + parent_block_hash: first_bhh.clone(), + num_txs: ops_0.len() as u64, + timestamp: first_block_header.timestamp + 1, + }); + let block_1 = BurnchainBlock::Bitcoin(BitcoinBlock::new( block_height_1, &block_hash_1, @@ -697,8 +1902,16 @@ mod tests { 360, )); + headers.push(BurnchainBlockHeader { + block_height: first_block_header.block_height + 2, + block_hash: block_hash_1.clone(), + parent_block_hash: block_hash_0.clone(), + num_txs: ops_1.len() as u64, + timestamp: first_block_header.timestamp + 2, + }); + let processed_ops_0 = burnchain_db - .store_new_burnchain_block(&burnchain, &block_0) + .store_new_burnchain_block(&burnchain, &headers, &block_0) .unwrap(); assert_eq!( @@ -708,7 +1921,7 @@ mod tests { ); let processed_ops_1 = burnchain_db - .store_new_burnchain_block(&burnchain, &block_1) + .store_new_burnchain_block(&burnchain, &headers, &block_1) .unwrap(); assert_eq!( @@ -744,4 +1957,166 @@ mod tests { panic!("EXPECTED to parse a stack stx op"); } } + + pub fn make_simple_block_commit( + burnchain: &Burnchain, + parent: Option<&LeaderBlockCommitOp>, + burn_header: &BurnchainBlockHeader, + block_hash: BlockHeaderHash, + ) -> LeaderBlockCommitOp { + let block_height = burn_header.block_height; + let mut new_op = LeaderBlockCommitOp { + sunset_burn: 0, + block_header_hash: block_hash, + new_seed: VRFSeed([1u8; 32]), + parent_block_ptr: 0, + parent_vtxindex: 0, + key_block_ptr: 0, + key_vtxindex: 0, + memo: vec![0], + + commit_outs: vec![ + StacksAddress { + version: 26, + bytes: Hash160::empty(), + }, + StacksAddress { + version: 26, + bytes: Hash160::empty(), + }, + ], + + burn_fee: 10000, + input: (next_txid(), 0), + apparent_sender: BurnchainSigner { + public_keys: vec![StacksPublicKey::from_hex( + "02d8015134d9db8178ac93acbc43170a2f20febba5087a5b0437058765ad5133d0", + ) + .unwrap()], + num_sigs: 1, + hash_mode: AddressHashMode::SerializeP2PKH, + }, + + txid: next_txid(), + vtxindex: 0, + block_height: block_height, + burn_parent_modulus: ((block_height - 1) % BURN_BLOCK_MINED_AT_MODULUS) as u8, + burn_header_hash: burn_header.block_hash.clone(), + }; + + if burnchain.is_in_prepare_phase(block_height) { + new_op.commit_outs = vec![StacksAddress { + version: 26, + bytes: Hash160::empty(), + }]; + } + + if let Some(ref op) = parent { + new_op.parent_block_ptr = op.block_height as u32; + new_op.parent_vtxindex = op.vtxindex as u16; + }; + + new_op + } + + #[test] + fn test_get_commit_at() { + let first_bhh = BurnchainHeaderHash([0; 32]); + let first_timestamp = 0; + let first_height = 1; + + let mut burnchain = Burnchain::regtest(":memory:"); + burnchain.pox_constants = PoxConstants::new(5, 3, 2, 3, 0, 99, 100); + burnchain.first_block_height = first_height; + burnchain.first_block_hash = first_bhh.clone(); + burnchain.first_block_timestamp = first_timestamp; + + let mut burnchain_db = BurnchainDB::connect(":memory:", &burnchain, true).unwrap(); + + let first_block_header = burnchain_db.get_canonical_chain_tip().unwrap(); + + let mut headers = vec![first_block_header.clone()]; + let mut parent = None; + let mut parent_block_header: Option = None; + let mut cmts = vec![]; + + for i in 0..5 { + let hdr = BurnchainHeaderHash([(i + 1) as u8; 32]); + let block_header = BurnchainBlockHeader { + block_height: (first_height + i) as u64, + block_hash: hdr, + parent_block_hash: parent_block_header + .as_ref() + .map(|blk| blk.block_hash.clone()) + .unwrap_or(first_block_header.block_hash.clone()), + num_txs: 1, + timestamp: i as u64, + }; + + headers.push(block_header.clone()); + parent_block_header = Some(block_header); + } + + for i in 0..5 { + let block_header = &headers[i + 1]; + + let cmt = make_simple_block_commit( + &burnchain, + parent.as_ref(), + block_header, + BlockHeaderHash([((i + 1) as u8) | 0x80; 32]), + ); + burnchain_db + .store_new_burnchain_block_ops_unchecked( + &burnchain, + &headers, + block_header, + &vec![BlockstackOperationType::LeaderBlockCommit(cmt.clone())], + ) + .unwrap(); + + cmts.push(cmt.clone()); + parent = Some(cmt); + } + + for i in 0..5 { + let cmt = BurnchainDB::get_commit_at( + &burnchain_db.conn(), + &headers, + (first_height + i) as u32, + 0, + ) + .unwrap() + .unwrap(); + assert_eq!(cmt, cmts[i as usize]); + } + + let cmt = BurnchainDB::get_commit_at(&burnchain_db.conn(), &headers, 5, 0) + .unwrap() + .unwrap(); + assert_eq!(cmt, cmts[4]); + + // fork off the last stored commit block + let fork_hdr = BurnchainHeaderHash([90 as u8; 32]); + let fork_block_header = BurnchainBlockHeader { + block_height: 4, + block_hash: fork_hdr, + parent_block_hash: BurnchainHeaderHash([5 as u8; 32]), + num_txs: 0, + timestamp: 4 as u64, + }; + + burnchain_db + .store_new_burnchain_block_ops_unchecked( + &burnchain, + &headers, + &fork_block_header, + &vec![], + ) + .unwrap(); + headers[4] = fork_block_header; + + let cmt = BurnchainDB::get_commit_at(&burnchain_db.conn(), &headers, 4, 0).unwrap(); + assert!(cmt.is_none()); + } } From 5d8b06b1d02bdf746931fbc7dadf223baba307b4 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 14 Jun 2021 01:41:15 -0400 Subject: [PATCH 009/116] feat: add a few more methods to the sortition DB to allow the chains coordinator to search for multiple histories of sortitions with differnent PoX IDs, and revalidate previously-inavlidated sortitions should the canonical PoX ID change. --- src/chainstate/burn/db/sortdb.rs | 135 +++++++++++++++++++++++++------ 1 file changed, 111 insertions(+), 24 deletions(-) diff --git a/src/chainstate/burn/db/sortdb.rs b/src/chainstate/burn/db/sortdb.rs index 38a7b1b81f..2fbadd0b05 100644 --- a/src/chainstate/burn/db/sortdb.rs +++ b/src/chainstate/burn/db/sortdb.rs @@ -111,6 +111,12 @@ impl FromRow for SortitionId { } } +impl FromRow for ConsensusHash { + fn from_row<'a>(row: &'a Row) -> Result { + ConsensusHash::from_column(row, "consensus_hash") + } +} + impl FromRow for MissedBlockCommit { fn from_row<'a>(row: &'a Row) -> Result { let intended_sortition = SortitionId::from_column(row, "intended_sortition_id")?; @@ -483,6 +489,8 @@ const SORTITION_DB_INITIAL_SCHEMA: &'static [&'static str] = &[ "CREATE INDEX snapshots_block_winning_hash ON snapshots(winning_stacks_block_hash);", "CREATE INDEX block_arrivals ON snapshots(arrival_index,burn_header_hash);", "CREATE INDEX arrival_indexes ON snapshots(arrival_index);", + "CREATE INDEX stacks_block_acceptance ON snapshots(stacks_block_accepted);", + "CREATE INDEX stacks_block_height ON snapshots(stacks_block_height);", r#" CREATE TABLE snapshot_transition_ops( sortition_id TEXT PRIMARY KEY, @@ -1780,7 +1788,7 @@ impl<'a> SortitionHandleConn<'a> { let prepare_end_sortid = self.get_sortition_id_for_bhh(prepare_end_bhh)? .ok_or_else(|| { - warn!("Missing parent"; "burn_header_hash" => %prepare_end_bhh); + warn!("Missing parent"; "burn_header_hash" => %prepare_end_bhh, "sortition_tip" => %&self.context.chain_tip); BurnchainError::MissingParentBlock })?; let block_height = SortitionDB::get_block_height(self.deref(), &prepare_end_sortid)? @@ -2243,6 +2251,30 @@ impl SortitionDB { let qry = "SELECT * FROM snapshots ORDER BY block_height ASC"; query_rows(self.conn(), qry, NO_PARAMS) } + + pub fn get_all_snapshots_for_burn_block( + conn: &DBConn, + bhh: &BurnchainHeaderHash, + ) -> Result, db_error> { + let qry = "SELECT * FROM snapshots WHERE burn_header_hash = ?1"; + query_rows(conn, qry, &[bhh]) + } + + /// Get the height of a consensus hash, even if it's not on the canonical PoX fork. + pub fn get_consensus_hash_height(&self, ch: &ConsensusHash) -> Result, db_error> { + let qry = "SELECT block_height FROM snapshots WHERE consensus_hash = ?1"; + let mut heights: Vec = query_rows(self.conn(), qry, &[ch])?; + if let Some(height) = heights.pop() { + for next_height in heights { + if height != next_height { + panic!("BUG: consensus hash {} has two different heights", ch); + } + } + Ok(Some(height)) + } else { + Ok(None) + } + } } impl<'a> SortitionDBConn<'a> { @@ -2348,25 +2380,42 @@ impl<'a> SortitionDBConn<'a> { Ok(ret) } - /// Get the height of a burnchain block - pub fn inner_get_burn_block_height( + pub fn find_parent_snapshot_for_stacks_block( &self, - burn_header_hash: &BurnchainHeaderHash, - ) -> Result, db_error> { - let qry = "SELECT block_height FROM snapshots WHERE burn_header_hash = ?1 LIMIT 1"; - query_row(self.conn(), qry, &[burn_header_hash]) - } + consensus_hash: &ConsensusHash, + block_hash: &BlockHeaderHash, + ) -> Result, db_error> { + let db_handle = SortitionHandleConn::open_reader_consensus(self, consensus_hash)?; + let parent_block_snapshot = match db_handle + .get_block_snapshot_of_parent_stacks_block(consensus_hash, &block_hash) + { + Ok(Some((_, sn))) => { + debug!( + "Parent of {}/{} is {}/{}", + consensus_hash, block_hash, sn.consensus_hash, sn.winning_stacks_block_hash + ); + sn + } + Ok(None) => { + debug!( + "Received block with unknown parent snapshot: {}/{}", + consensus_hash, block_hash, + ); + return Ok(None); + } + Err(db_error::InvalidPoxSortition) => { + warn!( + "Received block {}/{} on a non-canonical PoX sortition", + consensus_hash, block_hash, + ); + return Ok(None); + } + Err(e) => { + return Err(e); + } + }; - /// Get the burnchain hash given a height - pub fn inner_get_burn_header_hash( - &self, - height: u32, - ) -> Result, db_error> { - let tip = SortitionDB::get_canonical_burn_chain_tip(self.conn())?; - let ancestor_opt = - SortitionDB::get_ancestor_snapshot(&self, height as u64, &tip.sortition_id)? - .map(|snapshot| snapshot.burn_header_hash); - Ok(ancestor_opt) + Ok(Some(parent_block_snapshot)) } } @@ -2427,30 +2476,53 @@ impl SortitionDB { .flatten() } - pub fn invalidate_descendants_of( + pub fn revalidate_snapshot( + tx: &SortitionDBTx, + sortition_id: &SortitionId, + ) -> Result<(), BurnchainError> { + tx.tx().execute( + "UPDATE snapshots SET pox_valid = 1 WHERE sortition_id = ?1", + &[sortition_id], + )?; + Ok(()) + } + + pub fn invalidate_descendants_with_closure( &mut self, burn_block: &BurnchainHeaderHash, - ) -> Result<(), BurnchainError> { + mut cls: F, + ) -> Result<(), BurnchainError> + where + F: FnMut(&SortitionDBTx, &BurnchainHeaderHash, &Vec) -> (), + { let db_tx = self.tx_begin()?; let mut queue = vec![burn_block.clone()]; while let Some(header) = queue.pop() { - db_tx.tx().execute( - "UPDATE snapshots SET pox_valid = 0 WHERE parent_burn_header_hash = ?", - &[&header], - )?; let mut stmt = db_tx.prepare( "SELECT DISTINCT burn_header_hash FROM snapshots WHERE parent_burn_header_hash = ?", )?; for next_header in stmt.query_map(&[&header], |row| row.get(0))? { queue.push(next_header?); } + cls(&db_tx, &header, &queue); + db_tx.tx().execute( + "UPDATE snapshots SET pox_valid = 0 WHERE parent_burn_header_hash = ?", + &[&header], + )?; } db_tx.commit()?; Ok(()) } + pub fn invalidate_descendants_of( + &mut self, + burn_block: &BurnchainHeaderHash, + ) -> Result<(), BurnchainError> { + self.invalidate_descendants_with_closure(burn_block, |_tx, _bhh, _queue| {}) + } + /// Get the last sortition in the prepare phase that chose a particular Stacks block as the anchor, /// or if the anchor is not expected, return None pub fn get_prepare_end_for( @@ -3031,6 +3103,8 @@ impl SortitionDB { query_rows(conn, qry, args) } + /// Get the vtxindex of the winning sortition. + /// The sortition may not be valid. pub fn get_block_winning_vtxindex( conn: &Connection, sortition: &SortitionId, @@ -3287,6 +3361,19 @@ impl SortitionDB { ]; query_row(conn, sql, args) } + + /// Get all sortition IDs at the given burnchain block height (including ones that aren't on + /// the canonical PoX fork) + pub fn get_sortition_ids_at_height( + conn: &DBConn, + height: u64, + ) -> Result, db_error> { + query_rows( + conn, + "SELECT sortition_id FROM snapshots WHERE block_height = ?1", + &[&u64_to_sql(height)?], + ) + } } impl<'a> SortitionHandleTx<'a> { From b1bfabde16876564fe5b70becaf896ac58b0883b Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 14 Jun 2021 01:42:20 -0400 Subject: [PATCH 010/116] refactoring: use ..BlockSnapshot::initial() to populate BlockSnapshot instances in tests with defaults --- src/chainstate/burn/operations/leader_block_commit.rs | 3 ++- src/chainstate/burn/operations/leader_key_register.rs | 1 + src/chainstate/burn/operations/user_burn_support.rs | 1 + 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/src/chainstate/burn/operations/leader_block_commit.rs b/src/chainstate/burn/operations/leader_block_commit.rs index 720f14a05f..0b12404306 100644 --- a/src/chainstate/burn/operations/leader_block_commit.rs +++ b/src/chainstate/burn/operations/leader_block_commit.rs @@ -153,7 +153,7 @@ impl LeaderBlockCommitOp { } } - fn burn_block_mined_at(&self) -> u64 { + pub fn burn_block_mined_at(&self) -> u64 { self.burn_parent_modulus as u64 % BURN_BLOCK_MINED_AT_MODULUS } @@ -1669,6 +1669,7 @@ mod tests { canonical_stacks_tip_height: 0, canonical_stacks_tip_hash: BlockHeaderHash([0u8; 32]), canonical_stacks_tip_consensus_hash: ConsensusHash([0u8; 20]), + ..BlockSnapshot::initial(0, &first_burn_hash, 0) }; let mut tx = SortitionHandleTx::begin(&mut db, &prev_snapshot.sortition_id).unwrap(); diff --git a/src/chainstate/burn/operations/leader_key_register.rs b/src/chainstate/burn/operations/leader_key_register.rs index 9bd581489a..ce49d4c066 100644 --- a/src/chainstate/burn/operations/leader_key_register.rs +++ b/src/chainstate/burn/operations/leader_key_register.rs @@ -613,6 +613,7 @@ pub mod tests { canonical_stacks_tip_height: 0, canonical_stacks_tip_hash: BlockHeaderHash([0u8; 32]), canonical_stacks_tip_consensus_hash: ConsensusHash([0u8; 20]), + ..BlockSnapshot::initial(0, &first_burn_hash, 0) }; let mut tx = SortitionHandleTx::begin(&mut db, &prev_snapshot.sortition_id).unwrap(); diff --git a/src/chainstate/burn/operations/user_burn_support.rs b/src/chainstate/burn/operations/user_burn_support.rs index c31cee0127..c8d59d4782 100644 --- a/src/chainstate/burn/operations/user_burn_support.rs +++ b/src/chainstate/burn/operations/user_burn_support.rs @@ -625,6 +625,7 @@ mod tests { canonical_stacks_tip_height: 0, canonical_stacks_tip_hash: BlockHeaderHash([0u8; 32]), canonical_stacks_tip_consensus_hash: ConsensusHash([0u8; 20]), + ..BlockSnapshot::initial(0, &first_burn_hash, 0) }; let mut tx = SortitionHandleTx::begin(&mut db, &prev_snapshot.sortition_id).unwrap(); From 365e10ca2e8fcda488e9525befc8cde8d27f875c Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 14 Jun 2021 01:43:16 -0400 Subject: [PATCH 011/116] feat: make it so that previously-orphaned stacks block data can be forgotten, so a previously-unprocessable Stacks block can become processable again (i.e. in the event of a PoX reorg). In addition, remove a race condition in the invalid-block-deletion logic by moving the block file *and then* truncating it. --- src/chainstate/stacks/db/blocks.rs | 149 ++++++++++++++++++++++------- 1 file changed, 114 insertions(+), 35 deletions(-) diff --git a/src/chainstate/stacks/db/blocks.rs b/src/chainstate/stacks/db/blocks.rs index c4cc97e70d..1a7ae8245f 100644 --- a/src/chainstate/stacks/db/blocks.rs +++ b/src/chainstate/stacks/db/blocks.rs @@ -661,6 +661,7 @@ impl StacksChainState { } /// Do we have a stored a block in the chunk store? + /// Will be true even if it's invalid. pub fn has_block_indexed( blocks_dir: &String, index_block_hash: &StacksBlockId, @@ -678,6 +679,25 @@ impl StacksChainState { } } + /// Do we have a stored a block in the chunk store? + /// Will be true only if it's also valid (i.e. non-zero sized) + pub fn has_valid_block_indexed( + blocks_dir: &String, + index_block_hash: &StacksBlockId, + ) -> Result { + let block_path = StacksChainState::get_index_block_path(blocks_dir, index_block_hash)?; + match fs::metadata(block_path) { + Ok(md) => Ok(md.len() > 0), + Err(e) => { + if e.kind() == io::ErrorKind::NotFound { + Ok(false) + } else { + Err(Error::DBError(db_error::IOError(e))) + } + } + } + } + /// Have we processed and stored a particular block? pub fn has_stored_block( blocks_db: &DBConn, @@ -685,22 +705,33 @@ impl StacksChainState { consensus_hash: &ConsensusHash, block_hash: &BlockHeaderHash, ) -> Result { - let staging_status = - StacksChainState::has_staging_block(blocks_db, consensus_hash, block_hash)?; + let staging_status_opt = + StacksChainState::get_staging_block_status(blocks_db, consensus_hash, block_hash)? + .map(|processed| !processed); + let index_block_hash = StacksBlockHeader::make_index_block_hash(consensus_hash, block_hash); - if staging_status { - // not processed yet - test_debug!( - "Block {}/{} ({}) is staging", - consensus_hash, - block_hash, - &index_block_hash - ); - return Ok(false); + match staging_status_opt { + Some(staging_status) => { + if staging_status { + // not processed yet + test_debug!( + "Block {}/{} ({}) is staging", + consensus_hash, + block_hash, + &index_block_hash + ); + Ok(false) + } else { + // have a row in the DB at least. + // only accepted if we stored it + StacksChainState::has_block_indexed(blocks_dir, &index_block_hash) + } + } + None => { + // no row in the DB, so not processet at all. + Ok(false) + } } - - // only accepted if we stored it - StacksChainState::has_block_indexed(blocks_dir, &index_block_hash) } /// Store a block to the chunk store, named by its hash @@ -745,29 +776,33 @@ impl StacksChainState { StacksChainState::make_block_dir(blocks_dir, consensus_hash, &block_header_hash) .expect("FATAL: failed to create block directory"); + // try make this thread-safe. It's okay if this block gets copied more than once; we + // only care that at least one copy survives for further analysis. + let random_bytes = thread_rng().gen::<[u8; 8]>(); + let random_bytes_str = to_hex(&random_bytes); + let index_block_hash = StacksBlockId::new(consensus_hash, block_header_hash); + let mut invalid_path = + StacksChainState::get_index_block_pathbuf(blocks_dir, &index_block_hash); + invalid_path + .file_name() + .expect("FATAL: index block path did not have file name"); + invalid_path.set_extension(&format!("invalid-{}", &random_bytes_str)); + + fs::copy(&block_path, &invalid_path).expect(&format!( + "FATAL: failed to copy '{}' to '{}'", + &block_path, + &invalid_path.to_string_lossy(), + )); + // already freed? - let sz = StacksChainState::get_file_size(&block_path) - .expect(&format!("FATAL: failed to stat {}", &block_path)); + let sz = fs::metadata(&invalid_path) + .expect(&format!( + "FATAL: failed to stat '{}'", + &invalid_path.to_string_lossy() + )) + .len(); if sz > 0 { - // try make this thread-safe. It's okay if this block gets copied more than once; we - // only care that at least one copy survives for further analysis. - let random_bytes = thread_rng().gen::<[u8; 8]>(); - let random_bytes_str = to_hex(&random_bytes); - let index_block_hash = StacksBlockId::new(consensus_hash, block_header_hash); - let mut invalid_path = - StacksChainState::get_index_block_pathbuf(blocks_dir, &index_block_hash); - invalid_path - .file_name() - .expect("FATAL: index block path did not have file name"); - invalid_path.set_extension(&format!("invalid-{}", &random_bytes_str)); - - fs::copy(&block_path, &invalid_path).expect(&format!( - "FATAL: failed to copy '{}' to '{}'", - &block_path, - &invalid_path.to_string_lossy(), - )); - // truncate the original fs::OpenOptions::new() .read(false) @@ -1791,6 +1826,19 @@ impl StacksChainState { }) } + /// Do we have a given Stacks block in any PoX fork or sortition fork? + pub fn get_staging_block_consensus_hashes( + blocks_conn: &DBConn, + block_hash: &BlockHeaderHash, + ) -> Result, Error> { + query_rows::( + blocks_conn, + "SELECT consensus_hash FROM staging_blocks WHERE anchored_block_hash = ?1", + &[block_hash], + ) + .map_err(|e| e.into()) + } + /// Is a block orphaned? pub fn is_block_orphaned( blocks_conn: &DBConn, @@ -2106,6 +2154,34 @@ impl StacksChainState { Ok(()) } + /// Forget that a block and microblock stream was marked as invalid, given a particular consensus hash. + /// This is necessary when dealing with PoX reorgs, whereby an epoch can be unprocessible on one + /// fork but processable on another (i.e. the same block can show up in two different PoX + /// forks, but will only be valid in at most one of them). + /// This does not restore any block data; it merely makes it possible to go re-process them. + pub fn forget_orphaned_epoch_data<'a>( + tx: &mut DBTx<'a>, + consensus_hash: &ConsensusHash, + anchored_block_hash: &BlockHeaderHash, + ) -> Result<(), Error> { + test_debug!( + "Forget that {}/{} is orphaned, if it is orphaned at all", + consensus_hash, + anchored_block_hash + ); + + let sql = "DELETE FROM staging_blocks WHERE consensus_hash = ?1 AND anchored_block_hash = ?2 AND orphaned = 1 AND processed = 1"; + let args: &[&dyn ToSql] = &[consensus_hash, anchored_block_hash]; + + tx.execute(sql, args)?; + + let sql = "DELETE FROM staging_microblocks WHERE consensus_hash = ?1 AND anchored_block_hash = ?2 AND orphaned = 1 AND processed = 1"; + + tx.execute(sql, args)?; + + Ok(()) + } + /// Clear out a staging block -- mark it as processed. /// Mark its children as attachable. /// Idempotent. @@ -3255,7 +3331,7 @@ impl StacksChainState { &index_block_hash ); return Ok(false); - } else if StacksChainState::has_block_indexed(&blocks_path, &index_block_hash)? { + } else if StacksChainState::has_valid_block_indexed(&blocks_path, &index_block_hash)? { debug!( "Block already stored to chunk store: {}/{} ({})", consensus_hash, @@ -4784,6 +4860,9 @@ impl StacksChainState { None => { // no more work to do! debug!("No staging blocks"); + + // save any orphaning we did + chainstate_tx.commit().map_err(Error::DBError)?; return Ok((None, None)); } }; From f5da764b8872272e6cd7c9d41d4229ec4f9df9ee Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 14 Jun 2021 01:44:28 -0400 Subject: [PATCH 012/116] feat: fix #1805 by implementing Nakamoto consensus on the history of anchor blocks. The canonical Stacks fork must pass through the longest history of anchor blocks (by number of anchor blocks and empty reward cycles). Use anchor block affirmation maps to identify and track the heaviest anchor block history, and if the heaviest affirmation map changes, invalidate sortitions and reprocess them, but this time, use the new heaviest affirmation map to deduce which anchor blocks *must exist*. This not only makes it possible to reorg the Stacks blockchain if the network loses an anchor block, but also makes the act of re-affirming an anchor block N reward cycles ago *at least as hard as* mining N+1 new reward cycles. --- src/chainstate/coordinator/mod.rs | 980 +++++++++++++++++++++++++++--- 1 file changed, 898 insertions(+), 82 deletions(-) diff --git a/src/chainstate/coordinator/mod.rs b/src/chainstate/coordinator/mod.rs index 88623e508f..5b92d1ee08 100644 --- a/src/chainstate/coordinator/mod.rs +++ b/src/chainstate/coordinator/mod.rs @@ -14,18 +14,27 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::collections::{HashMap, HashSet, VecDeque}; +use std::cmp; +use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; use std::convert::{TryFrom, TryInto}; use std::sync::mpsc::SyncSender; use std::time::Duration; use burnchains::{ - db::{BurnchainBlockData, BurnchainDB}, - Address, Burnchain, BurnchainBlockHeader, Error as BurnchainError, Txid, + affirmation::{AffirmationMap, AffirmationMapEntry}, + bitcoin::indexer::BitcoinIndexer, + db::{ + BlockCommitMetadata, BurnchainBlockData, BurnchainDB, BurnchainDBTransaction, + BurnchainHeaderReader, + }, + Address, Burnchain, BurnchainBlockHeader, Error as BurnchainError, PoxConstants, Txid, }; use chainstate::burn::{ - db::sortdb::SortitionDB, operations::leader_block_commit::RewardSetInfo, - operations::BlockstackOperationType, BlockSnapshot, ConsensusHash, + db::sortdb::SortitionDB, + operations::leader_block_commit::{RewardSetInfo, BURN_BLOCK_MINED_AT_MODULUS}, + operations::BlockstackOperationType, + operations::LeaderBlockCommitOp, + BlockSnapshot, ConsensusHash, }; use chainstate::coordinator::comm::{ ArcCounterCoordinatorNotices, CoordinatorEvents, CoordinatorNotices, CoordinatorReceivers, @@ -34,7 +43,7 @@ use chainstate::stacks::index::MarfTrieId; use chainstate::stacks::{ db::{ accounts::MinerReward, ChainStateBootData, ClarityTx, MinerRewardInfo, StacksChainState, - StacksHeaderInfo, + StacksEpochReceipt, StacksHeaderInfo, }, events::{StacksTransactionEvent, StacksTransactionReceipt, TransactionOrigin}, Error as ChainstateError, StacksBlock, TransactionPayload, @@ -44,13 +53,18 @@ use monitoring::{ update_stacks_tip_height, }; use net::atlas::{AtlasConfig, AttachmentInstance}; +use util::db::DBConn; +use util::db::DBTx; use util::db::Error as DBError; +use util::get_epoch_time_secs; use vm::{ costs::ExecutionCost, types::{PrincipalData, QualifiedContractIdentifier}, Value, }; +use core::StacksEpochId; + use crate::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, PoxId, SortitionId, StacksAddress, StacksBlockHeader, StacksBlockId, @@ -65,7 +79,7 @@ pub mod tests; /// The 3 different states for the current /// reward cycle's relationship to its PoX anchor -#[derive(Debug, PartialEq)] +#[derive(Debug, Clone, PartialEq)] pub enum PoxAnchorBlockStatus { SelectedAndKnown(BlockHeaderHash, Vec), SelectedAndUnknown(BlockHeaderHash), @@ -147,6 +161,7 @@ pub struct ChainsCoordinator< canonical_sortition_tip: Option, canonical_chain_tip: Option, canonical_pox_id: Option, + heaviest_anchor_block_affirmation_map: Option, burnchain_blocks_db: BurnchainDB, chain_state_db: StacksChainState, sortition_db: SortitionDB, @@ -270,6 +285,12 @@ impl<'a, T: BlockEventDispatcher> let canonical_sortition_tip = SortitionDB::get_canonical_sortition_tip(sortition_db.conn()).unwrap(); + let heaviest_am = BurnchainDB::get_heaviest_anchor_block_affirmation_map( + burnchain_blocks_db.conn(), + &burnchain, + ) + .unwrap(); + let arc_notices = ArcCounterCoordinatorNotices { stacks_blocks_processed, sortitions_processed, @@ -279,6 +300,7 @@ impl<'a, T: BlockEventDispatcher> canonical_chain_tip: None, canonical_sortition_tip: Some(canonical_sortition_tip), canonical_pox_id: None, + heaviest_anchor_block_affirmation_map: Some(heaviest_am), burnchain_blocks_db, chain_state_db, sortition_db, @@ -342,10 +364,17 @@ impl<'a, T: BlockEventDispatcher, U: RewardSetProvider> ChainsCoordinator<'a, T, let canonical_sortition_tip = SortitionDB::get_canonical_sortition_tip(sortition_db.conn()).unwrap(); + let heaviest_am = BurnchainDB::get_heaviest_anchor_block_affirmation_map( + burnchain_blocks_db.conn(), + &burnchain, + ) + .unwrap(); + ChainsCoordinator { canonical_chain_tip: None, canonical_sortition_tip: Some(canonical_sortition_tip), canonical_pox_id: None, + heaviest_anchor_block_affirmation_map: Some(heaviest_am), burnchain_blocks_db, chain_state_db, sortition_db, @@ -411,6 +440,7 @@ pub fn get_reward_cycle_info( ic.get_chosen_pox_anchor(&parent_bhh, &burnchain.pox_constants) }?; if let Some((consensus_hash, stacks_block_hash)) = reward_cycle_info { + // it may have been elected, but we only process it if it's affirmed by the network! info!("Anchor block selected: {}", stacks_block_hash); let anchor_block_known = StacksChainState::is_stacks_block_processed( &chain_state.db(), @@ -427,8 +457,18 @@ pub fn get_reward_cycle_info( sort_db, &block_id, )?; + test_debug!( + "Stacks anchor block {}/{} is processed", + &consensus_hash, + &stacks_block_hash + ); PoxAnchorBlockStatus::SelectedAndKnown(stacks_block_hash, reward_set) } else { + test_debug!( + "Stacks anchor block {}/{} is NOT processed", + &consensus_hash, + &stacks_block_hash + ); PoxAnchorBlockStatus::SelectedAndUnknown(stacks_block_hash) }; Ok(Some(RewardCycleInfo { anchor_status })) @@ -497,20 +537,557 @@ fn dispatcher_announce_burn_ops( ); } +fn forget_orphan_stacks_blocks( + sort_conn: &DBConn, + chainstate_db_tx: &mut DBTx, + burn_header: &BurnchainHeaderHash, + invalidation_height: u64, +) { + if let Ok(sns) = SortitionDB::get_all_snapshots_for_burn_block(&sort_conn, &burn_header) { + for sn in sns.into_iter() { + // only retry blocks that are truly in descendant + // sortitions. + if sn.sortition && sn.block_height > invalidation_height { + if let Err(e) = StacksChainState::forget_orphaned_epoch_data( + chainstate_db_tx, + &sn.consensus_hash, + &sn.winning_stacks_block_hash, + ) { + warn!( + "Failed to forget that {}/{} is orphaned: {:?}", + &sn.consensus_hash, &sn.winning_stacks_block_hash, &e + ); + } + } + } + } +} + impl<'a, T: BlockEventDispatcher, N: CoordinatorNotices, U: RewardSetProvider> ChainsCoordinator<'a, T, N, U> { - pub fn handle_new_stacks_block(&mut self) -> Result<(), Error> { + pub fn handle_new_stacks_block(&mut self) -> Result, Error> { if let Some(pox_anchor) = self.process_ready_blocks()? { self.process_new_pox_anchor(pox_anchor) } else { - Ok(()) + Ok(None) + } + } + + /// Get all block snapshots and their PoX IDs at a given burnchain block height. + fn get_snapshots_and_pox_ids_at_height( + &mut self, + height: u64, + ) -> Result, Error> { + let sort_ids = SortitionDB::get_sortition_ids_at_height(self.sortition_db.conn(), height)?; + let ic = self.sortition_db.index_conn(); + + let mut ret = Vec::with_capacity(sort_ids.len()); + + for sort_id in sort_ids.iter() { + let handle = ic.as_handle(sort_id); + + let sn = SortitionDB::get_block_snapshot(&self.sortition_db.conn(), sort_id)? + .expect("BUG: have sortition ID without snapshot"); + + let pox_id = handle.get_pox_id()?; + ret.push((sn, pox_id)); + } + + Ok(ret) + } + + fn handle_affirmation_reorg(&mut self) -> Result<(), Error> { + let canonical_burnchain_tip = self.burnchain_blocks_db.get_canonical_chain_tip()?; + let heaviest_am = BurnchainDB::get_heaviest_anchor_block_affirmation_map( + self.burnchain_blocks_db.conn(), + &self.burnchain, + )?; + debug!( + "Heaviest anchor block affirmation map is {} at height {}, current is {:?}", + &heaviest_am, + canonical_burnchain_tip.block_height, + &self.heaviest_anchor_block_affirmation_map + ); + + // did the canonical affirmation map change? + if let Some(heaviest_am_before) = self.heaviest_anchor_block_affirmation_map.take() { + if let Some(changed_reward_cycle) = heaviest_am.find_divergence(&heaviest_am_before) { + let current_reward_cycle = self + .burnchain + .block_height_to_reward_cycle(canonical_burnchain_tip.block_height) + .unwrap_or(0); + if changed_reward_cycle < current_reward_cycle { + info!("Heaviest anchor block affirmation map changed from {} to {} in reward cycle {}", &heaviest_am_before, &heaviest_am, current_reward_cycle); + + let affirmation_pox_id = heaviest_am.as_pox_id(); + test_debug!( + "PoxId of new affirmation map {:?} is {}", + &heaviest_am, + &affirmation_pox_id + ); + + // find the lowest reward cycle we have to reprocess (which starts at burn + // block rc_start_block). + + // burn chain height at which we'll invalidate *all* sortitions + let mut last_invalidate_start_block = 0; + + // burn chain height at which we'll re-try orphaned Stacks blocks, and + // revalidate the sortitions that were previously invalid but have now been + // made valid + let mut first_invalidate_start_block = 0; + + // set of sortition IDs that are currently invalid, but will need to be reset + // as valid + let mut valid_sortition_ids = vec![]; + + let mut diverged = false; + for rc in changed_reward_cycle..current_reward_cycle { + last_invalidate_start_block = + self.burnchain.reward_cycle_to_block_height(rc); + first_invalidate_start_block = last_invalidate_start_block; + + // + 1 because the first sortition of a reward cycle is congruent to 1 mod + // reward_cycle_length. + let sort_ids = SortitionDB::get_sortition_ids_at_height( + self.sortition_db.conn(), + last_invalidate_start_block + 1, + )?; + + // find the sortition ID with the shortest PoX bitvector that is NOT a prefix + // of the canonical affirmation map's PoX bitvector. + let mut found_diverged = false; + for sort_id in sort_ids.iter() { + let ic = self.sortition_db.index_conn(); + let handle = ic.as_handle(sort_id); + + let pox_id = handle.get_pox_id()?; + test_debug!( + "Compare {} as prefix of {}?", + &pox_id, + &affirmation_pox_id + ); + if affirmation_pox_id.has_prefix(&pox_id) { + continue; + } + + // pox_id is NOT a prefix of affirmation_pox_id, but maybe it's only + // different by the last bit? + let prior_affirmation_pox_id = PoxId::new( + affirmation_pox_id.clone().into_inner() + [0..(affirmation_pox_id.len().saturating_sub(1))] + .to_vec(), + ); + let prior_pox_id = PoxId::new( + pox_id.clone().into_inner()[0..(pox_id.len().saturating_sub(1))] + .to_vec(), + ); + + if prior_affirmation_pox_id.has_prefix(&prior_pox_id) { + // this is the first reward cycle where history diverged. + found_diverged = true; + test_debug!("{} diverges from {}", &pox_id, affirmation_pox_id); + + // careful -- we might have already procesed sortitions in this + // reward cycle with this PoX ID, but that were never confirmed + let start_height = last_invalidate_start_block; + let end_height = canonical_burnchain_tip.block_height; // start_height + (self.burnchain.pox_constants.reward_cycle_length as u64); + 1; + for height in start_height..end_height { + let snapshots_and_pox_ids = + self.get_snapshots_and_pox_ids_at_height(height)?; + let num_sns = snapshots_and_pox_ids.len(); + test_debug!("{} snapshots at {}", num_sns, height); + + let mut found = false; + for (sn, sn_pox_id) in snapshots_and_pox_ids.into_iter() { + test_debug!( + "Snapshot {} height {} has PoX ID {}", + &sn.sortition_id, + sn.block_height, + &sn_pox_id + ); + if affirmation_pox_id.has_prefix(&sn_pox_id) { + // have already processed this sortitoin + test_debug!("Already processed sortition {} at height {} with PoX ID {} on canonical affirmation map {}", &sn.sortition_id, sn.block_height, &sn_pox_id, &heaviest_am); + found = true; + last_invalidate_start_block = height; + valid_sortition_ids.push(sn.sortition_id); + break; + } + } + if !found && num_sns > 0 { + // there are snapshots, and they're all diverged + debug!("No snapshot at height {} has a PoX ID that is a prefix of {} (affirmation map {})", height, &affirmation_pox_id, &heaviest_am); + break; + } + } + break; + } + } + + if !found_diverged { + continue; + } + + // we may have processed some sortitions correctly within this reward + // cycle. Advance forward until we find one that we haven't. + info!( + "Re-playing sortitions starting within reward cycle {} burn height {}", + rc, last_invalidate_start_block + ); + + diverged = true; + break; + } + + if diverged { + // find our ancestral sortition ID that's the end of the last reward cycle + // the new affirmation map would have in common with the old affirmation + // map, and invalidate its descendants + let ic = self.sortition_db.index_conn(); + let sortition_id = self.canonical_sortition_tip.as_ref().expect( + "FAIL: processing an affirmation reorg, but don't have a canonical sortition tip", + ); + + // first snapshot in which we'll invalidate all descendant snapshots, but retain some previously-invalidated snapshots + let revalidate_sn = SortitionDB::get_ancestor_snapshot( + &ic, + first_invalidate_start_block - 1, + &sortition_id, + )? + .expect(&format!( + "BUG: no ancestral sortition at height {}", + first_invalidate_start_block - 1 + )); + + // first snapshot at which we'll invalidate all descendant snapshots + let invalidate_sn = SortitionDB::get_ancestor_snapshot( + &ic, + last_invalidate_start_block - 1, + &sortition_id, + )? + .expect(&format!( + "BUG: no ancestral sortition at height {}", + last_invalidate_start_block - 1 + )); + + let invalidation_height = revalidate_sn.block_height; + let mut chainstate_db_tx = self.chain_state_db.db_tx_begin()?; + + debug!("Invalidate all descendants of {} (after height {} sortition {}), revalidate some sortitions at and after height {}, and retry all orphaned Stacks blocks at or after height {}", + &revalidate_sn.burn_header_hash, revalidate_sn.block_height, &revalidate_sn.sortition_id, invalidate_sn.block_height, first_invalidate_start_block); + + self.sortition_db.invalidate_descendants_with_closure( + &revalidate_sn.burn_header_hash, + |sort_tx, burn_header, invalidate_queue| { + // do this once in the transaction, after we've invalidated all other + // sibling blocks to these now-valid sortitions + test_debug!( + "Invalidate all sortitions for {} ({} remaining)", + &burn_header, + invalidate_queue.len() + ); + if invalidate_queue.len() == 0 { + // last time this method will be called + for valid_sn in valid_sortition_ids.iter() { + test_debug!("Revalidate snapshot {}", valid_sn); + SortitionDB::revalidate_snapshot(sort_tx, valid_sn).expect( + &format!( + "FATAL: failed to revalidate sortition {}", + valid_sn + ), + ); + } + } + + // permit re-processing of any associated stacks blocks if they're + // orphaned + forget_orphan_stacks_blocks( + sort_tx, + &mut chainstate_db_tx, + burn_header, + invalidation_height, + ); + }, + )?; + + for burn_height in + first_invalidate_start_block..(last_invalidate_start_block + 1) + { + // retry this orphan + let ic = self.sortition_db.index_conn(); + let handle = ic.as_handle(&sortition_id); + let sn = handle + .get_block_snapshot_by_height(burn_height)? + .expect("BUG: no ancestral snapshot"); + + forget_orphan_stacks_blocks( + &self.sortition_db.conn(), + &mut chainstate_db_tx, + &sn.burn_header_hash, + burn_height.saturating_sub(1), + ); + } + + // re-process the anchor block state for this reward cycle + let pox_id = affirmation_pox_id; + + let highest_valid_sortition_id = valid_sortition_ids + .last() + .unwrap_or(&invalidate_sn.sortition_id) + .to_owned(); + let highest_valid_snapshot = SortitionDB::get_block_snapshot( + &self.sortition_db.conn(), + &highest_valid_sortition_id, + )? + .expect(&format!( + "BUG: no such sortition {}", + &highest_valid_sortition_id + )); + + let (canonical_ch, canonical_bhh) = + SortitionDB::get_canonical_stacks_chain_tip_hash( + &self.sortition_db.conn(), + )?; + + debug!( + "Highest valid sortition is {} ({} in height {}); Stacks tip is {}/{}", + &highest_valid_snapshot.sortition_id, + &highest_valid_snapshot.burn_header_hash, + highest_valid_snapshot.block_height, + &canonical_ch, + &canonical_bhh + ); + + // by holding this lock as long as we do, we ensure that the sortition DB's + // view of the canonical stacks chain tip can't get changed (since no + // Stacks blocks can be processed). + chainstate_db_tx + .commit() + .map_err(|e| DBError::SqliteError(e))?; + + self.canonical_chain_tip = + Some(StacksBlockId::new(&canonical_ch, &canonical_bhh)); + + self.canonical_sortition_tip = Some(highest_valid_snapshot.sortition_id); + self.canonical_pox_id = Some(pox_id); + self.heaviest_anchor_block_affirmation_map = Some(heaviest_am); + } + } else { + self.heaviest_anchor_block_affirmation_map = Some(heaviest_am); + } + } else { + self.heaviest_anchor_block_affirmation_map = Some(heaviest_am); + } + } else { + self.heaviest_anchor_block_affirmation_map = Some(heaviest_am); + } + + Ok(()) + } + + /// Use the network's affirmations to re-interpret our local PoX anchor block status into what + /// the network affirmed was their PoX anchor block statuses. + /// If we're blocked on receiving a new anchor block that we don't have (i.e. the network + /// affirmed that it exists), then indicate so by returning its hash. + fn reinterpret_affirmed_pox_anchor_block_status( + &mut self, + canonical_affirmation_map: &AffirmationMap, + header: &BurnchainBlockHeader, + rc_info: &mut RewardCycleInfo, + ) -> Result, Error> { + // re-calculate the reward cycle info's anchor block status, based on what + // the network has affirmed in each prepare phase. + + // is this anchor block affirmed? Only process it if so! + let new_reward_cycle = self + .burnchain + .block_height_to_reward_cycle(header.block_height) + .expect("BUG: processed block before start of epoch 2.1"); + + test_debug!( + "Verify affirmation against PoX info in reward cycle {} canonical affirmation map {}", + new_reward_cycle, + &canonical_affirmation_map + ); + + let new_status = if new_reward_cycle > 0 + && new_reward_cycle <= (canonical_affirmation_map.len() as u64) + { + // we're processing an anchor block from an earlier reward cycle, + // meaning that we're in the middle of an affirmation reorg. + let affirmation = canonical_affirmation_map + .at(new_reward_cycle - 1) + .expect("BUG: checked index overflow"); + test_debug!("Affirmation '{}' for anchor block of previous reward cycle {} canonical affirmation map {}", &affirmation, new_reward_cycle - 1, &canonical_affirmation_map); + + // switch reward cycle info assessment based on what the network + // affirmed. + match &rc_info.anchor_status { + PoxAnchorBlockStatus::SelectedAndKnown(block_hash, reward_set) => { + match affirmation { + AffirmationMapEntry::PoxAnchorBlockPresent => { + // matches affirmation + PoxAnchorBlockStatus::SelectedAndKnown( + block_hash.clone(), + reward_set.clone(), + ) + } + AffirmationMapEntry::PoxAnchorBlockAbsent => { + // network actually affirms that this anchor block + // is absent. + warn!("Chose PoX anchor block for reward cycle {}, but it is affirmed absent by the network", new_reward_cycle - 1; "affirmation map" => %&canonical_affirmation_map); + PoxAnchorBlockStatus::SelectedAndUnknown(block_hash.clone()) + } + AffirmationMapEntry::Nothing => { + // no anchor block selected either way + PoxAnchorBlockStatus::NotSelected + } + } + } + PoxAnchorBlockStatus::SelectedAndUnknown(ref block_hash) => { + match affirmation { + AffirmationMapEntry::PoxAnchorBlockPresent => { + // the network affirms that this anchor block + // exists, but we don't have it locally. Stop + // processing here and wait for it to arrive, via + // the downloader. + info!("Anchor block {} for reward cycle {} is affirmed by the network ({}), but must be downloaded", block_hash, canonical_affirmation_map, new_reward_cycle - 1); + return Ok(Some(block_hash.clone())); + } + AffirmationMapEntry::PoxAnchorBlockAbsent => { + // matches affirmation + PoxAnchorBlockStatus::SelectedAndUnknown(block_hash.clone()) + } + AffirmationMapEntry::Nothing => { + // no anchor block selected either way + PoxAnchorBlockStatus::NotSelected + } + } + } + PoxAnchorBlockStatus::NotSelected => { + // no anchor block selected either way + PoxAnchorBlockStatus::NotSelected + } + } + } else { + // no-op: our view of the set of anchor blocks is consistent with + // the canonical affirmation map, so the status of this new anchor + // block is whatever it was calculated to be. + rc_info.anchor_status.clone() + }; + + // update new status + debug!( + "Update anchor block status for reawrd cycle {} from {:?} to {:?}", + new_reward_cycle, &rc_info.anchor_status, &new_status + ); + rc_info.anchor_status = new_status; + Ok(None) + } + + /// For unaffirmed anchor blocks, determine if they should be marked as present or absent. + fn has_unaffirmed_pox_anchor_block( + &self, + block_commit: LeaderBlockCommitOp, + _block_commit_metadata: BlockCommitMetadata, + ) -> bool { + let tip = SortitionDB::get_canonical_burn_chain_tip(self.sortition_db.conn()) + .expect("BUG: failed to query chain tip from sortition DB"); + let ic = self.sortition_db.index_conn(); + if let Some(sn) = SortitionDB::get_block_snapshot_for_winning_stacks_block( + &ic, + &tip.sortition_id, + &block_commit.block_header_hash, + ) + .expect("BUG: failed to query sortition DB") + { + // it exists on this sortition history, but do we have it in the chainstate? + let present = StacksChainState::has_stacks_block( + &self.chain_state_db.db(), + &StacksBlockHeader::make_index_block_hash( + &sn.consensus_hash, + &block_commit.block_header_hash, + ), + ) + .expect("BUG: failed to query chainstate DB"); + if present { + test_debug!( + "Have processed unaffirmed PoX anchor block {}/{} (burn height {})", + &sn.consensus_hash, + &block_commit.block_header_hash, + sn.block_height + ); + present + } else { + // have we instead maybe downloaded it but not processed it yet? + // NOTE: if the anchor block is unprocessable, it will eventually get orphaned + test_debug!( + "Have NOT processed unaffirmed PoX anchor block {}/{} (burn height {})", + &sn.consensus_hash, + &block_commit.block_header_hash, + sn.block_height + ); + let has_staging = StacksChainState::has_staging_block( + &self.chain_state_db.db(), + &sn.consensus_hash, + &block_commit.block_header_hash, + ) + .expect("BUG: failed to query chainstate DB"); + if has_staging { + test_debug!( + "Have unprocessed staging PoX anchor block {}/{} (burn height {})", + &sn.consensus_hash, + &block_commit.block_header_hash, + sn.block_height + ); + true + } else { + test_debug!( + "Do NOT have unprocessed staging PoX anchor block {}/{} (burn height {})", + &sn.consensus_hash, + &block_commit.block_header_hash, + sn.block_height + ); + false + } + } + } else { + test_debug!( + "No block snapshot for PoX anchor block {} off of sortition {}", + &block_commit.block_header_hash, + &tip.sortition_id + ); + return false; } } - pub fn handle_new_burnchain_block(&mut self) -> Result<(), Error> { + pub fn get_canonical_affirmation_map(&self) -> Result { + BurnchainDB::get_canonical_affirmation_map( + self.burnchain_blocks_db.conn(), + &self.burnchain, + |anchor_block_commit, anchor_block_metadata| { + self.has_unaffirmed_pox_anchor_block(anchor_block_commit, anchor_block_metadata) + }, + ) + .map_err(|e| e.into()) + } + + /// Handle a new burnchain block, optionally rolling back the canonical PoX sortition history + /// and setting it up to be replayed in the event the network affirms a different history. If + /// this happens, *and* if re-processing the new affirmed history is *blocked on* the + /// unavailability of a PoX anchor block that *must now* exist, then return the hash of this + /// anchor block. + pub fn handle_new_burnchain_block(&mut self) -> Result, Error> { + // first, see if the canonical affirmation map has changed. If so, this will wind back the + // canonical sortition and stacks chain tips. + self.handle_affirmation_reorg()?; + // Retrieve canonical burnchain chain tip from the BurnchainBlocksDB let canonical_burnchain_tip = self.burnchain_blocks_db.get_canonical_chain_tip()?; + let canonical_affirmation_map = self.get_canonical_affirmation_map()?; + debug!("Handle new canonical burnchain tip"; "height" => %canonical_burnchain_tip.block_height, "block_hash" => %canonical_burnchain_tip.block_hash.to_string()); @@ -522,19 +1099,23 @@ impl<'a, T: BlockEventDispatcher, N: CoordinatorNotices, U: RewardSetProvider> // We halt the ancestry research as soon as we find a processed parent let mut last_processed_ancestor = loop { if let Some(found_sortition) = self.sortition_db.is_sortition_processed(&cursor)? { + test_debug!( + "Ancestor sortition {} of block {} is processed", + &found_sortition, + &cursor + ); break found_sortition; } - let current_block = self - .burnchain_blocks_db - .get_burnchain_block(&cursor) - .map_err(|e| { - warn!( - "ChainsCoordinator: could not retrieve block burnhash={}", - &cursor - ); - Error::NonContiguousBurnchainBlock(e) - })?; + let current_block = + BurnchainDB::get_burnchain_block(&self.burnchain_blocks_db.conn(), &cursor) + .map_err(|e| { + warn!( + "ChainsCoordinator: could not retrieve block burnhash={}", + &cursor + ); + Error::NonContiguousBurnchainBlock(e) + })?; let parent = current_block.header.parent_block_hash.clone(); sortitions_to_process.push_front(current_block); @@ -551,9 +1132,22 @@ impl<'a, T: BlockEventDispatcher, N: CoordinatorNotices, U: RewardSetProvider> burn_header_hashes.join(", ") ); + let mut replay_blocks = vec![]; + for unprocessed_block in sortitions_to_process.into_iter() { let BurnchainBlockData { header, ops } = unprocessed_block; + let _reward_cycle = self + .burnchain + .block_height_to_reward_cycle(header.block_height) + .unwrap_or(u64::MAX); + test_debug!( + "Process burn block {} reward cycle {} in {}", + header.block_height, + _reward_cycle, + &self.burnchain.working_dir + ); + // calculate paid rewards during this burnchain block if we announce // to an events dispatcher let paid_rewards = if self.dispatcher.is_some() { @@ -567,7 +1161,47 @@ impl<'a, T: BlockEventDispatcher, N: CoordinatorNotices, U: RewardSetProvider> // at this point, we need to figure out if the sortition we are // about to process is the first block in reward cycle. - let reward_cycle_info = self.get_reward_cycle_info(&header)?; + let mut reward_cycle_info = self.get_reward_cycle_info(&header)?; + + if let Some(rc_info) = reward_cycle_info.as_mut() { + let cur_epoch = + SortitionDB::get_stacks_epoch(self.sortition_db.conn(), header.block_height)? + .expect(&format!( + "BUG: no epoch defined at height {}", + header.block_height + )); + + match cur_epoch.epoch_id { + StacksEpochId::Epoch21 => { + // potentially have an anchor block, but only process the next reward cycle (and + // subsequent reward cycles) with it if the prepare-phase block-commits affirm its + // presence. This only gets checked in Stacks 2.1 or later. + + // NOTE: this mutates rc_info + if let Some(missing_anchor_block) = self + .reinterpret_affirmed_pox_anchor_block_status( + &canonical_affirmation_map, + &header, + rc_info, + )? + { + // missing this anchor block -- cannot proceed + info!("Burnchain block processing stops due to missing affirmed anchor block {}", &missing_anchor_block); + return Ok(Some(missing_anchor_block)); + } + } + _ => { + // no-op -- pre 2.1 + } + }; + + test_debug!( + "Reward cycle info at height {}: {:?}", + &header.block_height, + &rc_info + ); + } + let (next_snapshot, _, reward_set_info) = self .sortition_db .evaluate_sortition( @@ -597,18 +1231,59 @@ impl<'a, T: BlockEventDispatcher, N: CoordinatorNotices, U: RewardSetProvider> "burn_height" => next_snapshot.block_height ); + // we may already have the associated Stacks block, but linked to a different sortition + // history. For example, if an anchor block was selected but PoX was voted disabled or + // not voted to activate, then the same Stacks blocks could be chosen but with + // different consensus hashes. So, check here if we happen to already have the block + // stored, and proceed to put it into staging again. + if next_snapshot.sortition { + let staging_block_chs = StacksChainState::get_staging_block_consensus_hashes( + self.chain_state_db.db(), + &next_snapshot.winning_stacks_block_hash, + )?; + + let mut found = false; + for ch in staging_block_chs.iter() { + if *ch == next_snapshot.consensus_hash { + found = true; + break; + } + } + + if !found && staging_block_chs.len() > 0 { + // we have seen this block before, but in a different consensus fork. + // queue it for re-processing -- it might still be valid if it's in a reward + // cycle that exists on the new PoX fork. + debug!("Sortition re-processes Stacks block {}, which is present on a different PoX fork", &next_snapshot.winning_stacks_block_hash); + + self.replay_stacks_blocks(vec![next_snapshot + .winning_stacks_block_hash + .clone()])?; + replay_blocks.push(next_snapshot.winning_stacks_block_hash); + } + } + // always bump canonical sortition tip: // if this code path is invoked, the canonical burnchain tip // has moved, so we should move our canonical sortition tip as well. self.canonical_sortition_tip = Some(sortition_id.clone()); last_processed_ancestor = sortition_id; + // self.replay_stacks_blocks(replay_blocks)?; + replay_blocks = vec![]; + if let Some(pox_anchor) = self.process_ready_blocks()? { - return self.process_new_pox_anchor(pox_anchor); + if let Some(expected_anchor_block_hash) = self.process_new_pox_anchor(pox_anchor)? { + info!( + "Burnchain block processing stops due to missing affirmed anchor block {}", + &expected_anchor_block_hash + ); + return Ok(Some(expected_anchor_block_hash)); + } } } - Ok(()) + Ok(None) } /// returns None if this burnchain block is _not_ the start of a reward cycle @@ -636,6 +1311,123 @@ impl<'a, T: BlockEventDispatcher, N: CoordinatorNotices, U: RewardSetProvider> ) } + /// Process any Atlas attachment events and forward them to the Atlas subsystem + fn process_atlas_attachment_events(&self, block_receipt: &StacksEpochReceipt) { + let mut attachments_instances = HashSet::new(); + for receipt in block_receipt.tx_receipts.iter() { + if let TransactionOrigin::Stacks(ref transaction) = receipt.transaction { + if let TransactionPayload::ContractCall(ref contract_call) = transaction.payload { + let contract_id = contract_call.to_clarity_contract_id(); + increment_contract_calls_processed(); + if self.atlas_config.contracts.contains(&contract_id) { + for event in receipt.events.iter() { + if let StacksTransactionEvent::SmartContractEvent(ref event_data) = + event + { + let res = AttachmentInstance::try_new_from_value( + &event_data.value, + &contract_id, + block_receipt.header.index_block_hash(), + block_receipt.header.block_height, + receipt.transaction.txid(), + ); + if let Some(attachment_instance) = res { + attachments_instances.insert(attachment_instance); + } + } + } + } + } + } + } + if !attachments_instances.is_empty() { + info!( + "Atlas: {} attachment instances emitted from events", + attachments_instances.len() + ); + match self.attachments_tx.send(attachments_instances) { + Ok(_) => {} + Err(e) => { + error!("Atlas: error dispatching attachments {}", e); + } + }; + } + } + + /// Replay any existing Stacks blocks we have that arose on a different PoX fork. + /// This is best-effort -- if a block isn't found or can't be loaded, it's skipped. + pub fn replay_stacks_blocks(&mut self, blocks: Vec) -> Result<(), Error> { + let tip = SortitionDB::get_canonical_burn_chain_tip(self.sortition_db.conn())?; + for bhh in blocks.into_iter() { + let staging_block_chs = StacksChainState::get_staging_block_consensus_hashes( + self.chain_state_db.db(), + &bhh, + )?; + let mut processed = false; + + debug!("Consider replaying {} from {:?}", &bhh, &staging_block_chs); + + for alt_ch in staging_block_chs.into_iter() { + let alt_id = StacksBlockHeader::make_index_block_hash(&alt_ch, &bhh); + if !StacksChainState::has_block_indexed(&self.chain_state_db.blocks_path, &alt_id) + .unwrap_or(false) + { + continue; + } + + // does this consensus hash exist somewhere? Doesn't have to be on the canonical + // PoX fork. + let ch_height_opt = self.sortition_db.get_consensus_hash_height(&alt_ch)?; + let ch_height = if let Some(ch_height) = ch_height_opt { + ch_height + } else { + continue; + }; + + // Find the corresponding snapshot on the canonical PoX fork. + let ancestor_sn = if let Some(sn) = SortitionDB::get_ancestor_snapshot( + &self.sortition_db.index_conn(), + ch_height, + &tip.sortition_id, + )? { + sn + } else { + continue; + }; + + // the new consensus hash + let ch = ancestor_sn.consensus_hash; + + if let Ok(Some(block)) = + StacksChainState::load_block(&self.chain_state_db.blocks_path, &alt_ch, &bhh) + { + let ic = self.sortition_db.index_conn(); + if let Some(parent_snapshot) = ic + .find_parent_snapshot_for_stacks_block(&ch, &bhh) + .unwrap_or(None) + { + // replay in this consensus hash history + debug!("Replay Stacks block from {} to {}/{}", &alt_ch, &ch, &bhh); + let _ = self.chain_state_db.preprocess_anchored_block( + &self.sortition_db.index_conn(), + &ch, + &block, + &parent_snapshot.consensus_hash, + get_epoch_time_secs(), + ); + processed = true; + break; + } + } + } + + if !processed { + test_debug!("Did NOT replay {}", &bhh); + } + } + Ok(()) + } + /// /// Process any ready staging blocks until there are either: /// * there are no more to process @@ -680,62 +1472,19 @@ impl<'a, T: BlockEventDispatcher, N: CoordinatorNotices, U: RewardSetProvider> self.notifier.notify_stacks_block_processed(); increment_stx_blocks_processed_counter(); - let block_hash = block_receipt.header.anchored_header.block_hash(); + self.process_atlas_attachment_events(&block_receipt); - let mut attachments_instances = HashSet::new(); - for receipt in block_receipt.tx_receipts.iter() { - if let TransactionOrigin::Stacks(ref transaction) = receipt.transaction { - if let TransactionPayload::ContractCall(ref contract_call) = - transaction.payload - { - let contract_id = contract_call.to_clarity_contract_id(); - increment_contract_calls_processed(); - if self.atlas_config.contracts.contains(&contract_id) { - for event in receipt.events.iter() { - if let StacksTransactionEvent::SmartContractEvent( - ref event_data, - ) = event - { - let res = AttachmentInstance::try_new_from_value( - &event_data.value, - &contract_id, - block_receipt.header.index_block_hash(), - block_receipt.header.block_height, - receipt.transaction.txid(), - ); - if let Some(attachment_instance) = res { - attachments_instances.insert(attachment_instance); - } - } - } - } - } - } - } - if !attachments_instances.is_empty() { - info!( - "Atlas: {} attachment instances emitted from events", - attachments_instances.len() - ); - match self.attachments_tx.send(attachments_instances) { - Ok(_) => {} - Err(e) => { - error!("Atlas: error dispatching attachments {}", e); - } - }; - } + let block_hash = block_receipt.header.anchored_header.block_hash(); + let winner_snapshot = SortitionDB::get_block_snapshot_for_winning_stacks_block( + &self.sortition_db.index_conn(), + canonical_sortition_tip, + &block_hash, + ) + .expect("FAIL: could not find block snapshot for winning block hash") + .expect("FAIL: could not find block snapshot for winning block hash"); if let Some(dispatcher) = self.dispatcher { let metadata = &block_receipt.header; - let winner_txid = SortitionDB::get_block_snapshot_for_winning_stacks_block( - &self.sortition_db.index_conn(), - canonical_sortition_tip, - &block_hash, - ) - .expect("FAIL: could not find block snapshot for winning block hash") - .expect("FAIL: could not find block snapshot for winning block hash") - .winning_block_txid; - let block: StacksBlock = { let block_path = StacksChainState::get_block_path( &self.chain_state_db.blocks_path, @@ -757,21 +1506,81 @@ impl<'a, T: BlockEventDispatcher, N: CoordinatorNotices, U: RewardSetProvider> block_receipt.header, block_receipt.tx_receipts, &parent, - winner_txid, + winner_snapshot.winning_block_txid.clone(), block_receipt.matured_rewards, block_receipt.matured_rewards_info, ); } - // if, just after processing the block, we _know_ that this block is a pox anchor, that means - // that sortitions have already begun processing that didn't know about this pox anchor. - // we need to trigger an unwind + // Was this block sufficiently confirmed by the prepare phase that it was a PoX + // anchor block? And if we're in epoch 2.1, does it match the heaviest-confirmed + // block-commit in the burnchain DB, and is it affirmed by the majority of the + // network? if let Some(pox_anchor) = self .sortition_db .is_stacks_block_pox_anchor(&block_hash, canonical_sortition_tip)? { - info!("Discovered an old anchor block: {}", &pox_anchor); - return Ok(Some(pox_anchor)); + // what epoch is this block in? + let cur_epoch = SortitionDB::get_stacks_epoch( + self.sortition_db.conn(), + winner_snapshot.block_height, + )? + .expect(&format!( + "BUG: no epoch defined at height {}", + winner_snapshot.block_height + )); + + match cur_epoch.epoch_id { + StacksEpochId::Epoch10 => { + panic!("BUG: Snapshot predates Stacks 2.0"); + } + StacksEpochId::Epoch20 => { + // 2.0 behavior: only consult the sortition DB + info!("Discovered an old anchor block: {}", &pox_anchor); + return Ok(Some(pox_anchor)); + } + StacksEpochId::Epoch21 => { + // 2.1 behavior: the anchor block must also be the + // heaviest-confirmed anchor block by BTC weight, and the highest + // such anchor block if there are multiple contenders. + if BurnchainDB::is_anchor_block( + self.burnchain_blocks_db.conn(), + &winner_snapshot.burn_header_hash, + &winner_snapshot.winning_block_txid, + )? { + // affirmed? + let canonical_am = self.get_canonical_affirmation_map()?; + + let commit = BurnchainDB::get_block_commit( + self.burnchain_blocks_db.conn(), + &winner_snapshot.winning_block_txid, + )? + .expect("BUG: no commit metadata in DB for existing commit"); + + let reward_cycle = self.burnchain.block_height_to_reward_cycle(commit.block_height) + .expect("BUG: accepted block commit has a block height before the first reward cycle"); + + if canonical_am + .at(reward_cycle) + .unwrap_or(AffirmationMapEntry::Nothing) + == AffirmationMapEntry::PoxAnchorBlockPresent + { + // yup, we're expecting this + info!("Discovered an old anchor block: {}", &pox_anchor); + return Ok(Some(pox_anchor)); + } else { + // nope -- can ignore + debug!( + "Discovered unaffirmed old anchor block: {}", + &pox_anchor + ); + return Ok(None); + } + } else { + debug!("Stacks block {} received F*w confirmations but is not the heaviest-confirmed burnchain block, so treating as non-anchor block", &pox_anchor); + } + } + } } } } @@ -784,7 +1593,14 @@ impl<'a, T: BlockEventDispatcher, N: CoordinatorNotices, U: RewardSetProvider> Ok(None) } - fn process_new_pox_anchor(&mut self, block_id: BlockHeaderHash) -> Result<(), Error> { + /// Process a new PoX anchor block, possibly resulting in the PoX history being unwound and + /// replayed through a different sequence of consensus hashes. If the new anchor block causes + /// the node to reach a prepare-phase that elects a network-affirmed anchor block that we don't + /// have, then return its block hash so the caller can go download and process it. + fn process_new_pox_anchor( + &mut self, + block_id: BlockHeaderHash, + ) -> Result, Error> { // get the last sortition in the prepare phase that chose this anchor block // that sortition is now the current canonical sortition, // and now that we have process the anchor block for the corresponding reward phase, From ae24b88c0751b9773df7a966ec65c07390c60513 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 14 Jun 2021 01:47:36 -0400 Subject: [PATCH 013/116] feat: add tests to cover the emergence of multiple anchor block history forks. Make it so the tests confirm that two anchor block history forks can "take turns" being the canonical fork, ensuring that the chains coordinator correctly reprocesses and even revalidates previously-invalid sortitions and their Stacks blocks. --- src/chainstate/coordinator/tests.rs | 1473 +++++++++++++++++++++++---- 1 file changed, 1291 insertions(+), 182 deletions(-) diff --git a/src/chainstate/coordinator/tests.rs b/src/chainstate/coordinator/tests.rs index 70a0e0f116..73c6c4dc6f 100644 --- a/src/chainstate/coordinator/tests.rs +++ b/src/chainstate/coordinator/tests.rs @@ -15,6 +15,7 @@ // along with this program. If not, see . use std::cmp; +use std::collections::BTreeMap; use std::collections::HashSet; use std::collections::VecDeque; use std::sync::{ @@ -26,6 +27,11 @@ use std::sync::{ use rusqlite::Connection; use address; +use burnchains::affirmation::*; +use burnchains::bitcoin::address::BitcoinAddress; +use burnchains::bitcoin::indexer::BitcoinIndexer; +use burnchains::bitcoin::BitcoinNetworkType; +use burnchains::db::tests::*; use burnchains::{db::*, *}; use chainstate; use chainstate::burn::db::sortdb::SortitionDB; @@ -42,7 +48,7 @@ use clarity_vm::clarity::ClarityConnection; use core; use core::*; use monitoring::increment_stx_blocks_processed_counter; -use util::hash::Hash160; +use util::hash::{hex_bytes, Hash160}; use util::vrf::*; use vm::{ costs::{ExecutionCost, LimitedCostTracker}, @@ -58,10 +64,23 @@ use crate::types::chainstate::{ use crate::types::proof::TrieHash; use crate::{types, util}; +use deps::bitcoin::blockdata::block::{BlockHeader, LoneBlockHeader}; +use deps::bitcoin::network::serialize::BitcoinHash; +use deps::bitcoin::util::hash::Sha256dHash; + lazy_static! { - static ref BURN_BLOCK_HEADERS: Arc = Arc::new(AtomicU64::new(1)); - static ref TXIDS: Arc = Arc::new(AtomicU64::new(1)); - static ref MBLOCK_PUBKHS: Arc = Arc::new(AtomicU64::new(1)); + pub static ref BURN_BLOCK_HEADERS: Arc = Arc::new(AtomicU64::new(1)); + pub static ref TXIDS: Arc = Arc::new(AtomicU64::new(1)); + pub static ref MBLOCK_PUBKHS: Arc = Arc::new(AtomicU64::new(1)); + pub static ref STACKS_BLOCK_HEADERS: Arc = Arc::new(AtomicU64::new(1)); +} + +pub fn next_block_hash() -> BlockHeaderHash { + let cur = STACKS_BLOCK_HEADERS.fetch_add(1, Ordering::SeqCst); + let mut bytes = vec![]; + bytes.extend_from_slice(&cur.to_le_bytes()); + bytes.extend_from_slice(&[0; 24]); + BlockHeaderHash::from_bytes(&bytes).unwrap() } pub fn next_burn_header_hash() -> BurnchainHeaderHash { @@ -90,6 +109,7 @@ pub fn next_hash160() -> Hash160 { /// Produce a burn block, insert it into burnchain_db, and insert it into others as well pub fn produce_burn_block<'a, I: Iterator>( + burnchain_conf: &Burnchain, burnchain_db: &mut BurnchainDB, par: &BurnchainHeaderHash, mut ops: Vec, @@ -97,14 +117,14 @@ pub fn produce_burn_block<'a, I: Iterator>( ) -> BurnchainHeaderHash { let BurnchainBlockData { header: par_header, .. - } = burnchain_db.get_burnchain_block(par).unwrap(); + } = BurnchainDB::get_burnchain_block(&burnchain_db.conn(), par).unwrap(); assert_eq!(&par_header.block_hash, par); let block_height = par_header.block_height + 1; for op in ops.iter_mut() { op.set_block_height(block_height); } - produce_burn_block_do_not_set_height(burnchain_db, par, ops, others) + produce_burn_block_do_not_set_height(burnchain_conf, burnchain_db, par, ops, others) } fn get_burn_distribution(conn: &Connection, sortition: &SortitionId) -> Vec { @@ -120,6 +140,7 @@ fn get_burn_distribution(conn: &Connection, sortition: &SortitionId) -> Vec>( + burnchain_conf: &Burnchain, burnchain_db: &mut BurnchainDB, par: &BurnchainHeaderHash, mut ops: Vec, @@ -127,12 +148,23 @@ fn produce_burn_block_do_not_set_height<'a, I: Iterator BurnchainHeaderHash { let BurnchainBlockData { header: par_header, .. - } = burnchain_db.get_burnchain_block(par).unwrap(); + } = BurnchainDB::get_burnchain_block(&burnchain_db.conn(), par).unwrap(); assert_eq!(&par_header.block_hash, par); let block_height = par_header.block_height + 1; let timestamp = par_header.timestamp + 1; let num_txs = ops.len() as u64; - let block_hash = next_burn_header_hash(); + + let bitcoin_header = BlockHeader { + bits: 0, + merkle_root: Sha256dHash([0u8; 32]), + nonce: 0, + prev_blockhash: par.to_bitcoin_hash(), + time: timestamp as u32, + version: 0x20000000, + }; + + let block_hash = BurnchainHeaderHash::from_bitcoin_hash(&bitcoin_header.bitcoin_hash()); + let header = BurnchainBlockHeader { block_height, timestamp, @@ -141,18 +173,47 @@ fn produce_burn_block_do_not_set_height<'a, I: Iterator, initial_balances: Option>, +) { + inner_setup_states( + paths, + vrf_keys, + committers, + pox_consts, + initial_balances, + StacksEpochId::Epoch20, + ) +} + +pub fn setup_states_2_1( + paths: &[&str], + vrf_keys: &[VRFPrivateKey], + committers: &[StacksPrivateKey], + pox_consts: Option, + initial_balances: Option>, +) { + inner_setup_states( + paths, + vrf_keys, + committers, + pox_consts, + initial_balances, + StacksEpochId::Epoch21, + ) +} + +fn inner_setup_states( + paths: &[&str], + vrf_keys: &[VRFPrivateKey], + committers: &[StacksPrivateKey], + pox_consts: Option, + initial_balances: Option>, + start_epoch: StacksEpochId, ) { let mut burn_block = None; let mut others = vec![]; @@ -182,24 +278,24 @@ pub fn setup_states( for path in paths.iter() { let burnchain = get_burnchain(path, pox_consts.clone()); + let epochs = match start_epoch { + StacksEpochId::Epoch20 => StacksEpoch::unit_test(burnchain.first_block_height), + StacksEpochId::Epoch21 => StacksEpoch::unit_test_2_1(burnchain.first_block_height), + _ => panic!("Cannot start in epoch 1.0"), + }; + let sortition_db = SortitionDB::connect( &burnchain.get_db_path(), burnchain.first_block_height, &burnchain.first_block_hash, burnchain.first_block_timestamp.into(), - &StacksEpoch::unit_test(burnchain.first_block_height), + &epochs, true, ) .unwrap(); - let burnchain_blocks_db = BurnchainDB::connect( - &burnchain.get_burnchaindb_path(), - burnchain.first_block_height, - &burnchain.first_block_hash, - burnchain.first_block_timestamp as u64, - true, - ) - .unwrap(); + let burnchain_blocks_db = + BurnchainDB::connect(&burnchain.get_burnchaindb_path(), &burnchain, true).unwrap(); if burn_block.is_none() { let first_sortition = @@ -237,15 +333,18 @@ pub fn setup_states( burnchain_blocks_db, first_sortition.burn_header_hash, registers, + path.clone(), )); } else { others.push(burnchain_blocks_db); } } - let (mut burnchain_blocks_db, burn_header_hash, registers) = burn_block.take().unwrap(); + let (mut burnchain_blocks_db, burn_header_hash, registers, path) = burn_block.take().unwrap(); + let burnchain = get_burnchain(path, pox_consts.clone()); produce_burn_block( + &burnchain, &mut burnchain_blocks_db, &burn_header_hash, registers, @@ -534,6 +633,36 @@ fn make_stacks_block( ) } +fn make_stacks_block_from_parent_sortition( + sort_db: &SortitionDB, + state: &mut StacksChainState, + burnchain: &Burnchain, + parent_block: &BlockHeaderHash, + parent_height: u64, + miner: &StacksPrivateKey, + my_burn: u64, + vrf_key: &VRFPrivateKey, + key_index: u32, + parent_sortition: BlockSnapshot, +) -> (BlockstackOperationType, StacksBlock) { + make_stacks_block_with_input( + sort_db, + state, + burnchain, + parent_block, + parent_height, + miner, + my_burn, + vrf_key, + key_index, + None, + 0, + false, + (Txid([0; 32]), 0), + Some(parent_sortition), + ) +} + /// build a stacks block with just the coinbase off of /// parent_block, in the canonical sortition fork of SortitionDB. /// parent_block _must_ be included in the StacksChainState @@ -596,6 +725,7 @@ fn make_stacks_block_with_recipients_and_sunset_burn( sunset_burn, post_sunset_burn, (Txid([0; 32]), 0), + None, ) } @@ -616,6 +746,7 @@ fn make_stacks_block_with_input( sunset_burn: u64, post_sunset_burn: bool, input: (Txid, u32), + parents_sortition_opt: Option, ) -> (BlockstackOperationType, StacksBlock) { let tx_auth = TransactionAuth::from_p2pkh(miner).unwrap(); @@ -632,23 +763,28 @@ fn make_stacks_block_with_input( let coinbase_op = tx_signer.get_tx().unwrap(); let sortition_tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); - let parents_sortition = SortitionDB::get_block_snapshot_for_winning_stacks_block( - &sort_db.index_conn(), - &sortition_tip.sortition_id, - parent_block, - ) - .unwrap() - .unwrap(); + let parents_sortition = if let Some(sn) = parents_sortition_opt { + sn + } else { + SortitionDB::get_block_snapshot_for_winning_stacks_block( + &sort_db.index_conn(), + &sortition_tip.sortition_id, + parent_block, + ) + .unwrap() + .unwrap() + }; + + eprintln!( + "Find parents stacks header: {} in sortition {} (height {}, parent {}/{},{}, index block hash {})", + &parent_block, &parents_sortition.sortition_id, parents_sortition.block_height, &parents_sortition.consensus_hash, parent_block, parent_height, &StacksBlockHeader::make_index_block_hash(&parents_sortition.consensus_hash, &parent_block) + ); let parent_vtxindex = SortitionDB::get_block_winning_vtxindex(sort_db.conn(), &parents_sortition.sortition_id) .unwrap() .unwrap(); - eprintln!( - "Find parents stacks header: {} in sortition {}", - &parent_block, &parents_sortition.sortition_id - ); let parent_stacks_header = StacksChainState::get_anchored_block_header_info( state.db(), &parents_sortition.consensus_hash, @@ -656,6 +792,9 @@ fn make_stacks_block_with_input( ) .unwrap() .unwrap(); + + eprintln!("Build off of {:?}", &parent_stacks_header); + let proof = VRF::prove(vrf_key, sortition_tip.sortition_hash.as_bytes()); let total_burn = parents_sortition.total_burn; @@ -748,7 +887,7 @@ fn missed_block_commits() { Some(initial_balances), ); - let mut coord = make_coordinator(path, Some(burnchain_conf)); + let mut coord = make_coordinator(path, Some(burnchain_conf.clone())); coord.handle_new_burnchain_block().unwrap(); @@ -816,6 +955,7 @@ fn missed_block_commits() { 0, false, last_input.as_ref().unwrap().clone(), + None, ); // NOTE: intended for block block_height - 2 last_input = Some(( @@ -868,6 +1008,7 @@ fn missed_block_commits() { 0, false, last_input.as_ref().unwrap().clone(), + None, ) }; @@ -881,6 +1022,7 @@ fn missed_block_commits() { if ix % (MINING_COMMITMENT_WINDOW as usize) == 3 { // produce an empty block! produce_burn_block( + &burnchain_conf, &mut burnchain, &burnchain_tip.block_hash, vec![], @@ -897,6 +1039,7 @@ fn missed_block_commits() { }, )); produce_burn_block_do_not_set_height( + &burnchain_conf, &mut burnchain, &burnchain_tip.block_hash, ops, @@ -1093,6 +1236,7 @@ fn test_simple_setup() { }; produce_burn_block( + &b, &mut burnchain, &burnchain_tip.block_hash, vec![op], @@ -1409,6 +1553,7 @@ fn test_sortition_with_reward_set() { let burnchain_tip = burnchain.get_canonical_chain_tip().unwrap(); produce_burn_block( + &b, &mut burnchain, &burnchain_tip.block_hash, ops, @@ -1642,6 +1787,7 @@ fn test_sortition_with_burner_reward_set() { let burnchain_tip = burnchain.get_canonical_chain_tip().unwrap(); produce_burn_block( + &b, &mut burnchain, &burnchain_tip.block_hash, ops, @@ -1894,6 +2040,7 @@ fn test_pox_btc_ops() { let burnchain_tip = burnchain.get_canonical_chain_tip().unwrap(); produce_burn_block( + &b, &mut burnchain, &burnchain_tip.block_hash, ops, @@ -2191,6 +2338,7 @@ fn test_stx_transfer_btc_ops() { let burnchain_tip = burnchain.get_canonical_chain_tip().unwrap(); produce_burn_block( + &b, &mut burnchain, &burnchain_tip.block_hash, ops, @@ -2287,7 +2435,7 @@ fn test_initial_coinbase_reward_distributions() { Some(initial_balances), ); - let mut coord = make_coordinator(path, Some(burnchain_conf)); + let mut coord = make_coordinator(path, Some(burnchain_conf.clone())); coord.handle_new_burnchain_block().unwrap(); @@ -2313,6 +2461,7 @@ fn test_initial_coinbase_reward_distributions() { let mut burnchain = get_burnchain_db(path, pox_consts.clone()); let burnchain_tip = burnchain.get_canonical_chain_tip().unwrap(); produce_burn_block( + &burnchain_conf, &mut burnchain, &burnchain_tip.block_hash, vec![], @@ -2352,6 +2501,7 @@ fn test_initial_coinbase_reward_distributions() { eprintln!("BURNCHAIN TIP HEIGHT = {}", burnchain_tip.block_height); if ix % 2 == 1 { produce_burn_block( + &burnchain_conf, &mut burnchain, &burnchain_tip.block_hash, vec![], @@ -2401,6 +2551,7 @@ fn test_initial_coinbase_reward_distributions() { let burnchain_tip = burnchain.get_canonical_chain_tip().unwrap(); produce_burn_block( + &b, &mut burnchain, &burnchain_tip.block_hash, ops, @@ -2681,6 +2832,7 @@ fn test_sortition_with_sunset() { let burnchain_tip = burnchain.get_canonical_chain_tip().unwrap(); produce_burn_block( + &b, &mut burnchain, &burnchain_tip.block_hash, ops, @@ -2752,10 +2904,6 @@ fn test_sortition_with_sunset() { } #[test] -// This test should panic until the MARF stability issue -// https://github.com/blockstack/stacks-blockchain/issues/1805 -// is resolved: -#[should_panic] /// Test a block that is processable in 2 PoX forks: /// block "11" should be processable in both `111` and `110` /// (because its parent is block `0`, and nobody stacks in @@ -2768,18 +2916,36 @@ fn test_pox_processable_block_in_different_pox_forks() { let _r = std::fs::remove_dir_all(path); let _r = std::fs::remove_dir_all(path_blinded); - let vrf_keys: Vec<_> = (0..12).map(|_| VRFPrivateKey::new()).collect(); - let committers: Vec<_> = (0..12).map(|_| StacksPrivateKey::new()).collect(); - - setup_states(&[path, path_blinded], &vrf_keys, &committers, None, None); + let pox_consts = Some(PoxConstants::new( + 5, + 2, + 2, + 25, + 5, + u64::max_value(), + u64::max_value(), + )); + let b = get_burnchain(path, pox_consts.clone()); + let b_blind = get_burnchain(path_blinded, pox_consts.clone()); + + let vrf_keys: Vec<_> = (0..20).map(|_| VRFPrivateKey::new()).collect(); + let committers: Vec<_> = (0..20).map(|_| StacksPrivateKey::new()).collect(); + + setup_states_2_1( + &[path, path_blinded], + &vrf_keys, + &committers, + pox_consts.clone(), + None, + ); - let mut coord = make_coordinator(path, None); - let mut coord_blind = make_coordinator(path_blinded, None); + let mut coord = make_coordinator(path, Some(b)); + let mut coord_blind = make_coordinator(path_blinded, Some(b_blind)); coord.handle_new_burnchain_block().unwrap(); coord_blind.handle_new_burnchain_block().unwrap(); - let sort_db = get_sortition_db(path, None); + let sort_db = get_sortition_db(path, pox_consts.clone()); let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); assert_eq!(tip.block_height, 1); @@ -2789,7 +2955,7 @@ fn test_pox_processable_block_in_different_pox_forks() { .unwrap() .unwrap(); - let sort_db_blind = get_sortition_db(path_blinded, None); + let sort_db_blind = get_sortition_db(path_blinded, pox_consts.clone()); let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db_blind.conn()).unwrap(); assert_eq!(tip.block_height, 1); @@ -2803,24 +2969,25 @@ fn test_pox_processable_block_in_different_pox_forks() { assert_eq!(ops.accepted_ops.len(), vrf_keys.len()); assert_eq!(ops.consumed_leader_keys.len(), 0); - // at first, sortition_ids shouldn't have diverged - // but once the first reward cycle begins, they should diverge. - let mut sortition_ids_diverged = false; // process sequential blocks, and their sortitions... let mut stacks_blocks: Vec<(SortitionId, StacksBlock)> = vec![]; - let mut anchor_blocks = vec![]; // setup: - // 0 - 1 - 2 - 3 - 4 - 5 - 6 - 7 - 8 - 9 - // \_ 10 _ 11 - // blocks `10` and `11` can be processed either - // in PoX fork 111 or in 110 + // sort:1 6 11 16 21 + // |----- rc 0 --------|------ rc 1 -------|----- rc 2 ------------|-------- rc 3 ----------|----- rc 4 + // ix: X - 0 - 1 - 2 - 3 - 4 - 5 - 6 - 7 - 8 - 9 + // \_____________________________________ 10 _ 11 _ 12 _ 13 _ 14 _ 15 _ 16 _ 17 _ 18 _ 19 + // + // for (ix, (vrf_key, miner)) in vrf_keys.iter().zip(committers.iter()).enumerate() { - let mut burnchain = get_burnchain_db(path, None); + let mut burnchain = get_burnchain_db(path, pox_consts.clone()); + let burnchain_blind = get_burnchain_db(path_blinded, pox_consts.clone()); let mut chainstate = get_chainstate(path); + let mut chainstate_blind = get_chainstate(path_blinded); let burnchain_tip = burnchain.get_canonical_chain_tip().unwrap(); - let burnchain_blinded = get_burnchain_db(path_blinded, None); - let b = get_burnchain(path, None); + let burnchain_tip_blind = burnchain_blind.get_canonical_chain_tip().unwrap(); + let b = get_burnchain(path, pox_consts.clone()); + let b_blind = get_burnchain(path_blinded, pox_consts.clone()); eprintln!("Making block {}", ix); let (op, block) = if ix == 0 { @@ -2839,169 +3006,1104 @@ fn test_pox_processable_block_in_different_pox_forks() { } else { stacks_blocks[ix - 1].1.header.block_hash() }; - make_stacks_block( - &sort_db, - &mut chainstate, - &b, - &parent, - burnchain_tip.block_height, - miner, - 10000, - vrf_key, - ix as u32, - ) + if ix < 10 { + make_stacks_block( + &sort_db, + &mut chainstate, + &b, + &parent, + burnchain_tip.block_height, + miner, + 10000, + vrf_key, + ix as u32, + ) + } else { + make_stacks_block( + &sort_db_blind, + &mut chainstate_blind, + &b_blind, + &parent, + burnchain_tip_blind.block_height, + miner, + 10000, + vrf_key, + ix as u32, + ) + } }; produce_burn_block( + &b, &mut burnchain, &burnchain_tip.block_hash, vec![op], - [burnchain_blinded].iter_mut(), + [burnchain_blind].iter_mut(), ); - // handle the sortition - coord.handle_new_burnchain_block().unwrap(); - coord_blind.handle_new_burnchain_block().unwrap(); - let new_burnchain_tip = burnchain.get_canonical_chain_tip().unwrap(); - if b.is_reward_cycle_start(new_burnchain_tip.block_height) { - eprintln!( - "Reward cycle start at height={}", - new_burnchain_tip.block_height - ); - // the "blinded" sortition db and the one that's processed all the blocks - // should have diverged in sortition_ids now... - sortition_ids_diverged = true; - // store the anchor block for this sortition for later checking - let ic = sort_db.index_handle_at_tip(); - let bhh = ic.get_last_anchor_block_hash().unwrap().unwrap(); - eprintln!( - "Anchor block={}, selected at height={}", - &bhh, - SortitionDB::get_block_snapshot_for_winning_stacks_block( - &sort_db.index_conn(), - &ic.context.chain_tip, - &bhh - ) - .unwrap() - .unwrap() - .block_height - ); - anchor_blocks.push(bhh); + // handle the sortition -- if + loop { + let missing_anchor_opt = coord.handle_new_burnchain_block().unwrap(); + if let Some(missing_anchor) = missing_anchor_opt { + eprintln!( + "Unblinded database reports missing anchor block {:?} (ix={})", + &missing_anchor, ix + ); + for (_, blk) in stacks_blocks.iter() { + if blk.block_hash() == missing_anchor { + let ic = sort_db.index_conn(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); + let sn = SortitionDB::get_block_snapshot_for_winning_stacks_block( + &ic, + &tip.sortition_id, + &blk.block_hash(), + ) + .unwrap() + .unwrap(); + + // feed this missing reward cycle data + let rc = b_blind + .block_height_to_reward_cycle(sn.block_height) + .unwrap(); + let start_height = b_blind.reward_cycle_to_block_height(rc); + for height in start_height..sn.block_height { + let asn = + SortitionDB::get_ancestor_snapshot(&ic, height, &tip.sortition_id) + .unwrap() + .unwrap(); + for (_, blk) in stacks_blocks.iter() { + if blk.block_hash() == asn.winning_stacks_block_hash { + eprintln!("Unblinded database accepts missing anchor block ancestor {} of {} (ix={})", &blk.block_hash(), &missing_anchor, ix); + preprocess_block(&mut chainstate, &sort_db, &asn, blk.clone()); + coord.handle_new_stacks_block().unwrap(); + break; + } + } + } + + // *now* process this anchor block + eprintln!( + "Unblinded database processes missing anchor block {} (ix={})", + &missing_anchor, ix + ); + preprocess_block(&mut chainstate, &sort_db, &sn, blk.clone()); + coord.handle_new_stacks_block().unwrap(); + break; + } + } + } else { + coord.handle_new_stacks_block().unwrap(); + break; + } } + coord_blind.handle_new_burnchain_block().unwrap(); + coord_blind.handle_new_stacks_block().unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); let blinded_tip = SortitionDB::get_canonical_burn_chain_tip(sort_db_blind.conn()).unwrap(); - if sortition_ids_diverged { - assert_ne!( - tip.sortition_id, blinded_tip.sortition_id, - "Sortitions should have diverged by block height = {}", - blinded_tip.block_height - ); - } else { - assert_eq!( - tip.sortition_id, blinded_tip.sortition_id, - "Sortitions should not have diverged at block height = {}", - blinded_tip.block_height + + if ix < 10 { + // load the block into staging and process it on the un-blinded sortition DB + let block_hash = block.header.block_hash(); + eprintln!( + "Block hash={}, parent={}, height={}, ix={} (not blind)", + &block_hash, &block.header.parent_block, block.header.total_work.work, ix ); - } - // load the block into staging - let block_hash = block.header.block_hash(); - eprintln!("Block hash={}, ix={}", &block_hash, ix); + assert_eq!(&tip.winning_stacks_block_hash, &block_hash); + stacks_blocks.push((tip.sortition_id.clone(), block.clone())); - assert_eq!(&tip.winning_stacks_block_hash, &block_hash); - stacks_blocks.push((tip.sortition_id.clone(), block.clone())); + preprocess_block(&mut chainstate, &sort_db, &tip, block.clone()); - preprocess_block(&mut chainstate, &sort_db, &tip, block); + // handle the stacks block + coord.handle_new_stacks_block().unwrap(); + } + if ix == 0 || ix >= 10 { + // load the block into staging and process it on the blinded sortition DB + let block_hash = block.header.block_hash(); + eprintln!( + "Block hash={}, parent={}, height={}, ix={} (blind)", + &block_hash, &block.header.parent_block, block.header.total_work.work, ix + ); - // handle the stacks block - coord.handle_new_stacks_block().unwrap(); + assert_eq!(&blinded_tip.winning_stacks_block_hash, &block_hash); + if ix != 0 { + stacks_blocks.push((blinded_tip.sortition_id.clone(), block.clone())); + } + + preprocess_block(&mut chainstate_blind, &sort_db_blind, &blinded_tip, block); + + // handle the stacks block + coord_blind.handle_new_stacks_block().unwrap(); + } + if ix == 18 { + // right at the end of reward cycle 3 -- feed in the blocks from the blinded DB into + // the unblinded DB + for (i, (_, block)) in stacks_blocks.iter().enumerate() { + if i >= 10 && i <= ix { + eprintln!("Mirror blocks from blinded DB to unblinded DB (simulates downloading them) i={}", i); + let ic = sort_db_blind.index_conn(); + let sn = SortitionDB::get_block_snapshot_for_winning_stacks_block( + &ic, + &tip.sortition_id, + &block.block_hash(), + ) + .unwrap() + .unwrap(); + preprocess_block(&mut chainstate, &sort_db, &sn, block.clone()); + let _ = coord.handle_new_stacks_block(); + } + } + } + if ix > 18 { + // starting in reward cycle 4 -- this should NOT panic + eprintln!("Mirror block {} to unblinded DB", ix); + preprocess_block(&mut chainstate, &sort_db, &tip, stacks_blocks[ix].1.clone()); + let _ = coord.handle_new_stacks_block(); + } } + // both the blinded and unblined chains should now have the same view let block_height = eval_at_chain_tip(path, &sort_db, "block-height"); - assert_eq!(block_height, Value::UInt(10)); + assert_eq!(block_height, Value::UInt(11)); let block_height = eval_at_chain_tip(path_blinded, &sort_db_blind, "block-height"); - assert_eq!(block_height, Value::UInt(0)); - - { - let ic = sort_db.index_handle_at_tip(); - let pox_id = ic.get_pox_id().unwrap(); - assert_eq!(&pox_id.to_string(), "111"); - } + assert_eq!(block_height, Value::UInt(11)); + // because of the affirmations, the canonical PoX ID deliberately omits anchor blocks { let ic = sort_db_blind.index_handle_at_tip(); let pox_id = ic.get_pox_id().unwrap(); - assert_eq!(&pox_id.to_string(), "100"); + assert_eq!(&pox_id.to_string(), "110011"); } - - // now, we reveal `0` to the blinded coordinator - - reveal_block( - path_blinded, - &sort_db_blind, - &mut coord_blind, - &stacks_blocks[0].0, - &stacks_blocks[0].1, - ); - - // after revealing ``0``, we should now have the anchor block for - // the first reward cycle after the initial one - { - let ic = sort_db_blind.index_handle_at_tip(); + let ic = sort_db.index_handle_at_tip(); let pox_id = ic.get_pox_id().unwrap(); - assert_eq!(&pox_id.to_string(), "110"); + assert_eq!(&pox_id.to_string(), "110011"); } - // now, the blinded node should be able to process blocks 10 and 11 - // 10 will process fine, because its parent has consensus hash = - // INITIAL_CONSENSUS_HASH - // 11 will NOT process fine, even though it _should_, because its parents - // consensus hash is different than the consensus hash of the parent when it was mined + // same canonical Stacks chain tip + let stacks_tip = SortitionDB::get_canonical_stacks_chain_tip_hash(sort_db.conn()).unwrap(); + let stacks_tip_blind = + SortitionDB::get_canonical_stacks_chain_tip_hash(sort_db_blind.conn()).unwrap(); + assert_eq!(stacks_tip, stacks_tip_blind); - let sort_id = SortitionDB::get_block_snapshot_for_winning_stacks_block( - &sort_db_blind.index_conn(), - &SortitionDB::get_canonical_sortition_tip(sort_db_blind.conn()).unwrap(), - &stacks_blocks[10].1.block_hash(), - ) - .unwrap() - .unwrap() - .sortition_id; + // same final consensus hash, at the start of height 20 + let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); + let blinded_tip = SortitionDB::get_canonical_burn_chain_tip(sort_db_blind.conn()).unwrap(); - reveal_block( - path_blinded, - &sort_db_blind, - &mut coord_blind, - &sort_id, - &stacks_blocks[10].1, + assert!(tip.sortition); + assert!(blinded_tip.sortition); + assert_eq!( + tip.winning_stacks_block_hash, + blinded_tip.winning_stacks_block_hash ); + assert_eq!(tip.burn_header_hash, blinded_tip.burn_header_hash); + assert_eq!(tip.consensus_hash, blinded_tip.consensus_hash); + assert_eq!(tip.block_height, 21); + assert_eq!(blinded_tip.block_height, 21); +} - let block_height = eval_at_chain_tip(path_blinded, &sort_db_blind, "block-height"); - assert_eq!(block_height, Value::UInt(2)); - eprintln!("Processed block 10 okay!"); - - // won't successfully process the block - let sort_id = SortitionDB::get_block_snapshot_for_winning_stacks_block( - &sort_db_blind.index_conn(), - &SortitionDB::get_canonical_sortition_tip(sort_db_blind.conn()).unwrap(), - &stacks_blocks[11].1.block_hash(), - ) - .unwrap() - .unwrap() - .sortition_id; +fn highest_block(sortitions: &HashMap) -> BlockSnapshot { + let mut max_k = 0; + for (k, _) in sortitions.iter() { + if *k > max_k { + max_k = *k; + } + } + sortitions.get(&max_k).clone().unwrap().to_owned() +} - reveal_block( - path_blinded, - &sort_db_blind, - &mut coord_blind, - &sort_id, - &stacks_blocks[11].1, +fn replay_reward_cycle<'a>( + stacks_blocks: &Vec<(SortitionId, StacksBlock, u64)>, + b: &Burnchain, + coord: &mut ChainsCoordinator<'a, NullEventDispatcher, (), OnChainRewardSetProvider>, + sort_db: &SortitionDB, + chainstate: &mut StacksChainState, +) { + for _ in 0..b.pox_constants.reward_cycle_length { + let missing_anchor_opt = coord.handle_new_burnchain_block().unwrap(); + if let Some(missing_anchor) = missing_anchor_opt { + eprintln!( + "Database {} reports missing anchor block {:?}", + &b.working_dir, &missing_anchor + ); + for (_, blk, ..) in stacks_blocks.iter() { + if blk.block_hash() == missing_anchor { + let ic = sort_db.index_conn(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); + let sn = SortitionDB::get_block_snapshot_for_winning_stacks_block( + &ic, + &tip.sortition_id, + &blk.block_hash(), + ) + .unwrap() + .unwrap(); + + // feed this missing reward cycle data + let rc = b.block_height_to_reward_cycle(sn.block_height).unwrap(); + for height in 0..sn.block_height { + let asn = + SortitionDB::get_ancestor_snapshot(&ic, height, &tip.sortition_id) + .unwrap() + .unwrap(); + for (_, blk, ..) in stacks_blocks.iter() { + if blk.block_hash() == asn.winning_stacks_block_hash { + eprintln!( + "Database {} accepts missing anchor block ancestor {} of {}", + &b.working_dir, + &blk.block_hash(), + &missing_anchor + ); + preprocess_block(chainstate, sort_db, &asn, blk.clone()); + coord.handle_new_stacks_block().unwrap(); + break; + } + } + } + + // *now* process this anchor block + eprintln!( + "Database {} processes missing anchor block {}", + &b.working_dir, &missing_anchor + ); + preprocess_block(chainstate, sort_db, &sn, blk.clone()); + coord.handle_new_stacks_block().unwrap(); + break; + } + } + } else { + coord.handle_new_stacks_block().unwrap(); + break; + } + } +} + +fn load_sortitions(sort_db: &SortitionDB) -> HashMap { + let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); + let ic = sort_db.index_conn(); + let mut ret = HashMap::new(); + for height in 0..(tip.block_height + 1) { + if let Some(sn) = + SortitionDB::get_ancestor_snapshot(&ic, height, &tip.sortition_id).unwrap() + { + ret.insert(sn.block_height, sn); + } + } + ret +} + +#[test] +fn test_pox_affirmation_fork_duel() { + let path = "/tmp/stacks-blockchain.test.pox_affirmation_fork_duel"; + // setup a second set of states that won't see the broadcasted blocks + let path_blinded = "/tmp/stacks-blockchain.test.pox_affirmation_fork_duel.blinded"; + let _r = std::fs::remove_dir_all(path); + let _r = std::fs::remove_dir_all(path_blinded); + + let pox_consts = Some(PoxConstants::new( + 5, + 2, + 2, + 25, + 5, + u64::max_value(), + u64::max_value(), + )); + let b = get_burnchain(path, pox_consts.clone()); + let b_blind = get_burnchain(path_blinded, pox_consts.clone()); + + let vrf_keys: Vec<_> = (0..29).map(|_| VRFPrivateKey::new()).collect(); + let committers: Vec<_> = (0..29).map(|_| StacksPrivateKey::new()).collect(); + + setup_states_2_1( + &[path, path_blinded], + &vrf_keys, + &committers, + pox_consts.clone(), + None, ); - let block_height = eval_at_chain_tip(path_blinded, &sort_db_blind, "block-height"); - assert_eq!(block_height, Value::UInt(3)); - eprintln!("Processed block 11 okay!"); + let mut coord = make_coordinator(path, Some(b)); + let mut coord_blind = make_coordinator(path_blinded, Some(b_blind)); + + coord.handle_new_burnchain_block().unwrap(); + coord_blind.handle_new_burnchain_block().unwrap(); + + let sort_db = get_sortition_db(path, pox_consts.clone()); + + let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); + assert_eq!(tip.block_height, 1); + assert_eq!(tip.sortition, false); + let (_, ops) = sort_db + .get_sortition_result(&tip.sortition_id) + .unwrap() + .unwrap(); + + let sort_db_blind = get_sortition_db(path_blinded, pox_consts.clone()); + + let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db_blind.conn()).unwrap(); + assert_eq!(tip.block_height, 1); + assert_eq!(tip.sortition, false); + let (_, ops) = sort_db_blind + .get_sortition_result(&tip.sortition_id) + .unwrap() + .unwrap(); + + // we should have all the VRF registrations accepted + assert_eq!(ops.accepted_ops.len(), vrf_keys.len()); + assert_eq!(ops.consumed_leader_keys.len(), 0); + + // process sequential blocks, and their sortitions, and burn block heights + let mut stacks_blocks: Vec<(SortitionId, StacksBlock, u64)> = vec![]; + + let mut snapshots: HashMap = HashMap::new(); + let mut snapshots_blind: HashMap = HashMap::new(); + + let mut fork_num = 0; + let mut fork_start = 0; + let mut sortition_fork_start = 0; + + // setup: + // sort:1 6 11 16 21 + // |----- rc 0 --------|------- rc 1 --------|----- rc 2 ------------|-------- rc 3 ----------|----- rc 4 + // ix: X - 0 - 1 - 2 - 3 - 4 ------------------- 10 - 11 - 12 - 13 - 14 ------------------------- 20 - 21 + // \________________ 5 _ 6 _ 7 _ 8 _ 9 ________________________ 15 _ 16 _ 17 _ 18 _ 19 + // + // 2 - 3 - 4 ------------------- 10 - 11 - 12 - 13 - 14 - 15 ------------------------ 21 - 22 - 23 - 24 - 25 + // \________ 5 _ 6 _ 7 _ 8 _ 9 _____________________________ 16 _ 17 _ 18 _ 19 _ 20 ________________________ 26 _ 27 _ 28 _ 29 + for (ix, (vrf_key, miner)) in vrf_keys.iter().zip(committers.iter()).enumerate() { + let mut burnchain = get_burnchain_db(path, pox_consts.clone()); + let burnchain_blind = get_burnchain_db(path_blinded, pox_consts.clone()); + let mut chainstate = get_chainstate(path); + let mut chainstate_blind = get_chainstate(path_blinded); + let burnchain_tip = burnchain.get_canonical_chain_tip().unwrap(); + let burnchain_tip_blind = burnchain_blind.get_canonical_chain_tip().unwrap(); + let b = get_burnchain(path, pox_consts.clone()); + let b_blind = get_burnchain(path_blinded, pox_consts.clone()); + + let prev_reward_cycle = b + .block_height_to_reward_cycle(burnchain_tip.block_height.saturating_sub(1)) + .unwrap_or(0); + let cur_reward_cycle = b + .block_height_to_reward_cycle(burnchain_tip.block_height) + .unwrap_or(0); + let next_reward_cycle = b + .block_height_to_reward_cycle(burnchain_tip.block_height + 1) + .unwrap_or(0); + let will_rc_start = cur_reward_cycle != next_reward_cycle; + + let burn_block = burnchain_tip.block_height; + + let (op, block) = if ix == 0 { + make_genesis_block( + &sort_db, + &mut chainstate, + &BlockHeaderHash([0; 32]), + miner, + 10000, + vrf_key, + ix as u32, + ) + } else { + let (parent, parent_sortition) = if burn_block == 5 { + fork_num += 1; + fork_start = stacks_blocks.len() - 1; + sortition_fork_start = highest_block(&snapshots).block_height; + let p = stacks_blocks[0].1.header.block_hash(); + eprintln!("snapshots = {:?}", &snapshots); + (p, snapshots.get(&2).unwrap().clone()) + } else if burn_block == 10 { + fork_num += 1; + let p = stacks_blocks[fork_start - 1].1.header.block_hash(); + let sn = snapshots_blind + .get(&(sortition_fork_start - 1)) + .unwrap() + .clone(); + fork_start = stacks_blocks.len() - 2; + sortition_fork_start = highest_block(&snapshots_blind).block_height; + (p, sn) + } else if burn_block == 16 || burn_block == 21 || burn_block == 26 { + fork_num += 1; + let p = stacks_blocks[fork_start].1.header.block_hash(); + let parent_burn_height = if burn_block == 16 { 9 } else { burn_block - 6 }; + + let sn = if fork_num % 2 == 1 { + let sn = snapshots_blind.get(&parent_burn_height).unwrap().clone(); + sn + } else { + let sn = snapshots.get(&parent_burn_height).unwrap().clone(); + sn + }; + + fork_start = stacks_blocks.len() - 2; + (p, sn) + } else { + let p = stacks_blocks[ix - 1].1.header.block_hash(); + let sn = if fork_num % 2 == 1 { + highest_block(&snapshots_blind) + } else { + highest_block(&snapshots) + }; + (p, sn) + }; + + if fork_num % 2 == 0 { + eprintln!("Making block {} off {} burn block {} parent {} {}, prev rc = {}, cur rc = {}, next rc = {}, will_rc_start? {} fork_num = {}, fork_start = {}, (not blind)", + ix, &parent, burn_block, parent_sortition.block_height, &parent_sortition.consensus_hash, prev_reward_cycle, cur_reward_cycle, next_reward_cycle, will_rc_start, fork_num, fork_start); + make_stacks_block_from_parent_sortition( + &sort_db, + &mut chainstate, + &b, + &parent, + parent_sortition.block_height, + miner, + 10000, + vrf_key, + ix as u32, + parent_sortition, + ) + } else { + eprintln!("Making block {} off {} burn block {} parent {} {}, prev rc = {}, cur rc = {}, next rc = {}, will_rc_start? {}, fork_num = {}, fork_start = {} (blind)", + ix, &parent, burn_block, parent_sortition.block_height, &parent_sortition.consensus_hash, prev_reward_cycle, cur_reward_cycle, next_reward_cycle, will_rc_start, fork_num, fork_start); + make_stacks_block_from_parent_sortition( + &sort_db_blind, + &mut chainstate_blind, + &b_blind, + &parent, + parent_sortition.block_height, + miner, + 10000, + vrf_key, + ix as u32, + parent_sortition, + ) + } + }; + + eprintln!("Made block {} {}, burn block {}, prev rc = {}, cur rc = {}, next rc = {}, will_rc_start? {}", ix, block.block_hash(), burnchain_tip.block_height, prev_reward_cycle, cur_reward_cycle, next_reward_cycle, will_rc_start); + produce_burn_block( + &b, + &mut burnchain, + &burnchain_tip.block_hash, + vec![op], + [burnchain_blind].iter_mut(), + ); + + if burn_block == 14 { + // blinded DB has NOT undergone a PoX reorg yet -- it believes that the anchor blocks for + // reward cycles 1 and 2 have valid snapshots, but it's simply missing the anchor block + // for rc 1. + let burnchain_blind = get_burnchain_db(path_blinded, pox_consts.clone()); + + let (rc1_bc, rc1_bc_md) = + BurnchainDB::get_anchor_block_commit(burnchain_blind.conn(), 1) + .unwrap() + .unwrap(); + let (rc2_bc, rc2_bc_md) = + BurnchainDB::get_anchor_block_commit(burnchain_blind.conn(), 2) + .unwrap() + .unwrap(); + + let tip_blind = + SortitionDB::get_canonical_burn_chain_tip(sort_db_blind.conn()).unwrap(); + let ic = sort_db_blind.index_conn(); + let handle = ic.as_handle(&tip_blind.sortition_id); + + let rc1_sn_before = handle + .get_block_snapshot(&rc1_bc_md.burn_block_hash) + .unwrap() + .unwrap(); + let rc2_sn_before = handle + .get_block_snapshot(&rc2_bc_md.burn_block_hash) + .unwrap() + .unwrap(); + assert!(rc1_sn_before.pox_valid); + assert!(rc2_sn_before.pox_valid); + + eprintln!("Handle new burnchain block {} (not blind)", burn_block); + replay_reward_cycle(&stacks_blocks, &b, &mut coord, &sort_db, &mut chainstate); + replay_reward_cycle(&stacks_blocks, &b, &mut coord, &sort_db, &mut chainstate); + + eprintln!("Handle new burnchain block {} (blind)", burn_block); + replay_reward_cycle( + &stacks_blocks, + &b_blind, + &mut coord_blind, + &sort_db_blind, + &mut chainstate_blind, + ); + replay_reward_cycle( + &stacks_blocks, + &b_blind, + &mut coord_blind, + &sort_db_blind, + &mut chainstate_blind, + ); + + // PoX reorg has happened -- the blinded DB learned about the anchor block for rc2 + let tip_blind = + SortitionDB::get_canonical_burn_chain_tip(sort_db_blind.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); + + test_debug!("Blind and unblind chain tips should match"); + assert_eq!(tip.block_height, tip_blind.block_height); + assert_eq!(tip.consensus_hash, tip_blind.consensus_hash); + + // anchor blocks still on the same consensus history + let tip_blind = + SortitionDB::get_canonical_burn_chain_tip(sort_db_blind.conn()).unwrap(); + let ic = sort_db_blind.index_conn(); + let handle = ic.as_handle(&tip_blind.sortition_id); + + let rc1_sn = handle + .get_block_snapshot(&rc1_bc_md.burn_block_hash) + .unwrap() + .unwrap(); + let rc2_sn = handle + .get_block_snapshot(&rc2_bc_md.burn_block_hash) + .unwrap() + .unwrap(); + + assert!(rc1_sn_before.pox_valid); + assert!(rc2_sn_before.pox_valid); + + assert_eq!(rc1_sn_before.consensus_hash, rc1_sn.consensus_hash); + assert_ne!(rc2_sn_before.consensus_hash, rc2_sn.consensus_hash); // different + + let rc2_sn_invalid = SortitionDB::get_block_snapshot_consensus( + sort_db_blind.conn(), + &rc2_sn_before.consensus_hash, + ) + .unwrap() + .unwrap(); + assert_eq!(rc2_sn_invalid.consensus_hash, rc2_sn_before.consensus_hash); + assert!(!rc2_sn_invalid.pox_valid); + + // blinded DB's affirmation map is consistent with unblinded DB + let heaviest_am = coord_blind.get_canonical_affirmation_map().unwrap(); + assert_eq!(heaviest_am, AffirmationMap::decode("paa").unwrap()); + } else if burn_block == 15 { + // in order to keep mining on the blinded chainstate, then we'll need to make it so + // the blinded DB wants to confirm the anchor block for rc 2. But, it no longer believes + // that rc 2's anchor block is canonical. We need to override this and force it to re-process. + let mut burnchain_blind = get_burnchain_db(path_blinded, pox_consts.clone()); + + let (rc1_bc, rc1_bc_md) = + BurnchainDB::get_anchor_block_commit(burnchain_blind.conn(), 1) + .unwrap() + .unwrap(); + let (rc2_bc, rc2_bc_md) = + BurnchainDB::get_anchor_block_commit(burnchain_blind.conn(), 2) + .unwrap() + .unwrap(); + let (rc3_bc, rc3_bc_md) = + BurnchainDB::get_anchor_block_commit(burnchain_blind.conn(), 3) + .unwrap() + .unwrap(); + + let tip_blind = + SortitionDB::get_canonical_burn_chain_tip(sort_db_blind.conn()).unwrap(); + let ic = sort_db_blind.index_conn(); + let handle = ic.as_handle(&tip_blind.sortition_id); + + let rc1_sn_before = handle + .get_block_snapshot(&rc1_bc_md.burn_block_hash) + .unwrap() + .unwrap(); + let rc2_sn_before = handle + .get_block_snapshot(&rc2_bc_md.burn_block_hash) + .unwrap() + .unwrap(); + let rc3_sn_before = handle + .get_block_snapshot(&rc3_bc_md.burn_block_hash) + .unwrap() + .unwrap(); + + // force the DB to "revert" back to assuming + let tx = burnchain_blind.tx_begin().unwrap(); + tx.set_override_affirmation_map(1, AffirmationMap::decode("").unwrap()) + .unwrap(); + tx.set_override_affirmation_map(2, AffirmationMap::decode("a").unwrap()) + .unwrap(); + tx.set_override_affirmation_map(3, AffirmationMap::decode("ap").unwrap()) + .unwrap(); + tx.set_override_affirmation_map(4, AffirmationMap::decode("apa").unwrap()) + .unwrap(); + tx.set_override_affirmation_map(5, AffirmationMap::decode("apap").unwrap()) + .unwrap(); + tx.set_override_affirmation_map(6, AffirmationMap::decode("apapa").unwrap()) + .unwrap(); + tx.set_override_affirmation_map(7, AffirmationMap::decode("apapap").unwrap()) + .unwrap(); + tx.commit().unwrap(); + + eprintln!("Replay rc 0 for blinded db"); + replay_reward_cycle( + &stacks_blocks, + &b_blind, + &mut coord_blind, + &sort_db_blind, + &mut chainstate_blind, + ); + eprintln!("Replay rc 1 for blinded db"); + replay_reward_cycle( + &stacks_blocks, + &b_blind, + &mut coord_blind, + &sort_db_blind, + &mut chainstate_blind, + ); + eprintln!("Replay rc 2 for blinded db"); + replay_reward_cycle( + &stacks_blocks, + &b_blind, + &mut coord_blind, + &sort_db_blind, + &mut chainstate_blind, + ); + + snapshots_blind = load_sortitions(&sort_db_blind); + + // blinded DB has undergone a PoX reorg that "reverts" it -- it now believes that rc2's + // anchor block exists, but not rc1's or rc3's. + let tip_blind = + SortitionDB::get_canonical_burn_chain_tip(sort_db_blind.conn()).unwrap(); + let ic = sort_db_blind.index_conn(); + let handle = ic.as_handle(&tip_blind.sortition_id); + + test_debug!( + "Test anchor block-commit for rc1 {} in {}", + &rc1_bc.block_header_hash, + &rc1_bc_md.burn_block_hash + ); + let rc1_sn = handle + .get_block_snapshot(&rc1_bc_md.burn_block_hash) + .unwrap() + .unwrap(); + assert_eq!(rc1_sn.consensus_hash, rc1_sn_before.consensus_hash); + + test_debug!( + "Test anchor block-commit for rc2 {} in {}", + &rc2_bc.block_header_hash, + &rc2_bc_md.burn_block_hash + ); + let rc2_sn = handle + .get_block_snapshot(&rc2_bc_md.burn_block_hash) + .unwrap() + .unwrap(); + assert_ne!(rc2_sn.consensus_hash, rc2_sn_before.consensus_hash); + + test_debug!( + "Test anchor block-commit for rc3 {} in {}", + &rc3_bc.block_header_hash, + &rc3_bc_md.burn_block_hash + ); + let rc3_sn = handle + .get_block_snapshot(&rc3_bc_md.burn_block_hash) + .unwrap() + .unwrap(); + assert_ne!(rc3_sn.consensus_hash, rc3_sn_before.consensus_hash); + + // old rc2 anchor block consensus hash isn't valid anymore + let rc2_sn_invalid = SortitionDB::get_block_snapshot_consensus( + sort_db_blind.conn(), + &rc2_sn_before.consensus_hash, + ) + .unwrap() + .unwrap(); + assert!(!rc2_sn_invalid.pox_valid); + + // blinded DB's affirmation map is consistent with its original history + let heaviest_am = coord_blind.get_canonical_affirmation_map().unwrap(); + assert_eq!(heaviest_am, AffirmationMap::decode("apa").unwrap()); + } else if burn_block == 19 { + // merge both chains' views + let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); + let tip_blind = + SortitionDB::get_canonical_burn_chain_tip(sort_db_blind.conn()).unwrap(); + + assert_ne!(tip.consensus_hash, tip_blind.consensus_hash); + + eprintln!("Handle new burnchain block {} (not blind)", burn_block); + replay_reward_cycle(&stacks_blocks, &b, &mut coord, &sort_db, &mut chainstate); + replay_reward_cycle(&stacks_blocks, &b, &mut coord, &sort_db, &mut chainstate); + replay_reward_cycle(&stacks_blocks, &b, &mut coord, &sort_db, &mut chainstate); + replay_reward_cycle(&stacks_blocks, &b, &mut coord, &sort_db, &mut chainstate); + + eprintln!("Handle new burnchain block {} (blind)", burn_block); + replay_reward_cycle( + &stacks_blocks, + &b_blind, + &mut coord_blind, + &sort_db_blind, + &mut chainstate_blind, + ); + replay_reward_cycle( + &stacks_blocks, + &b_blind, + &mut coord_blind, + &sort_db_blind, + &mut chainstate_blind, + ); + replay_reward_cycle( + &stacks_blocks, + &b_blind, + &mut coord_blind, + &sort_db_blind, + &mut chainstate_blind, + ); + replay_reward_cycle( + &stacks_blocks, + &b_blind, + &mut coord_blind, + &sort_db_blind, + &mut chainstate_blind, + ); + + let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); + let tip_blind = + SortitionDB::get_canonical_burn_chain_tip(sort_db_blind.conn()).unwrap(); + assert_eq!(tip.consensus_hash, tip_blind.consensus_hash); + + // the unblinded DB's affirmation map is consistent with the blinded DB's history + let heaviest_am = coord.get_canonical_affirmation_map().unwrap(); + assert_eq!(heaviest_am, AffirmationMap::decode("apaa").unwrap()); + } else if burn_block == 20 { + // the unblinded DB wants to confirm the anchor block for rc 1, but it no longer believes + // that rc 1 is canonical. We need to override this and force it to re-process. + let mut burnchain = get_burnchain_db(path, pox_consts.clone()); + + let (rc1_bc, rc1_bc_md) = BurnchainDB::get_anchor_block_commit(burnchain.conn(), 1) + .unwrap() + .unwrap(); + let (rc2_bc, rc2_bc_md) = BurnchainDB::get_anchor_block_commit(burnchain.conn(), 2) + .unwrap() + .unwrap(); + let (rc3_bc, rc3_bc_md) = BurnchainDB::get_anchor_block_commit(burnchain.conn(), 3) + .unwrap() + .unwrap(); + let (rc4_bc, rc4_bc_md) = BurnchainDB::get_anchor_block_commit(burnchain.conn(), 4) + .unwrap() + .unwrap(); + + let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); + let ic = sort_db.index_conn(); + let handle = ic.as_handle(&tip.sortition_id); + + let rc1_sn_before = handle + .get_block_snapshot(&rc1_bc_md.burn_block_hash) + .unwrap() + .unwrap(); + let rc2_sn_before = handle + .get_block_snapshot(&rc2_bc_md.burn_block_hash) + .unwrap() + .unwrap(); + let rc3_sn_before = handle + .get_block_snapshot(&rc3_bc_md.burn_block_hash) + .unwrap() + .unwrap(); + let rc4_sn_before = handle + .get_block_snapshot(&rc4_bc_md.burn_block_hash) + .unwrap() + .unwrap(); + + let tx = burnchain.tx_begin().unwrap(); + tx.set_override_affirmation_map(1, AffirmationMap::decode("").unwrap()) + .unwrap(); + tx.set_override_affirmation_map(2, AffirmationMap::decode("p").unwrap()) + .unwrap(); + tx.set_override_affirmation_map(3, AffirmationMap::decode("pa").unwrap()) + .unwrap(); + tx.set_override_affirmation_map(4, AffirmationMap::decode("pap").unwrap()) + .unwrap(); + tx.set_override_affirmation_map(5, AffirmationMap::decode("papa").unwrap()) + .unwrap(); + tx.commit().unwrap(); + + eprintln!("Replay rc 0 for unblinded db"); + replay_reward_cycle(&stacks_blocks, &b, &mut coord, &sort_db, &mut chainstate); + eprintln!("Replay rc 1 for unblinded db"); + replay_reward_cycle(&stacks_blocks, &b, &mut coord, &sort_db, &mut chainstate); + eprintln!("Replay rc 2 for unblinded db"); + replay_reward_cycle(&stacks_blocks, &b, &mut coord, &sort_db, &mut chainstate); + eprintln!("Replay rc 3 for unblinded db"); + replay_reward_cycle(&stacks_blocks, &b, &mut coord, &sort_db, &mut chainstate); + eprintln!("Replay rc 4 for unblinded db"); + replay_reward_cycle(&stacks_blocks, &b, &mut coord, &sort_db, &mut chainstate); + + snapshots = load_sortitions(&sort_db); + + // unblinded DB has undergone a PoX reorg that "reverts" it -- it now believes that rc1's + // anchor block exists as well as rc3's, but not rc2's or rc4's. + let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); + let ic = sort_db.index_conn(); + let handle = ic.as_handle(&tip.sortition_id); + + test_debug!( + "Test anchor block-commit for rc1 {} in {}", + &rc1_bc.block_header_hash, + &rc1_bc_md.burn_block_hash + ); + let rc1_sn = handle + .get_block_snapshot(&rc1_bc_md.burn_block_hash) + .unwrap() + .unwrap(); + assert_eq!(rc1_sn.consensus_hash, rc1_sn_before.consensus_hash); + + test_debug!( + "Test anchor block-commit for rc2 {} in {}", + &rc2_bc.block_header_hash, + &rc2_bc_md.burn_block_hash + ); + let rc2_sn = handle + .get_block_snapshot(&rc2_bc_md.burn_block_hash) + .unwrap() + .unwrap(); + assert_ne!(rc2_sn.consensus_hash, rc2_sn_before.consensus_hash); + + test_debug!( + "Test anchor block-commit for rc3 {} in {}", + &rc3_bc.block_header_hash, + &rc3_bc_md.burn_block_hash + ); + let rc3_sn = handle + .get_block_snapshot(&rc3_bc_md.burn_block_hash) + .unwrap() + .unwrap(); + assert_ne!(rc3_sn.consensus_hash, rc3_sn_before.consensus_hash); + + test_debug!( + "Test anchor block-commit for rc4 {} in {}", + &rc4_bc.block_header_hash, + &rc4_bc_md.burn_block_hash + ); + let rc4_sn = handle + .get_block_snapshot(&rc4_bc_md.burn_block_hash) + .unwrap() + .unwrap(); + assert_ne!(rc4_sn.consensus_hash, rc4_sn_before.consensus_hash); + + // old rc2 anchor block consensus hash isn't valid anymore + let rc2_sn_invalid = SortitionDB::get_block_snapshot_consensus( + sort_db.conn(), + &rc2_sn_before.consensus_hash, + ) + .unwrap() + .unwrap(); + assert!(!rc2_sn_invalid.pox_valid); + + // old rc4 anchor block consensus hash isn't valid anymore + let rc4_sn_invalid = SortitionDB::get_block_snapshot_consensus( + sort_db.conn(), + &rc4_sn_before.consensus_hash, + ) + .unwrap() + .unwrap(); + assert!(!rc4_sn_invalid.pox_valid); + + // blinded DB's affirmation map is consistent with its original history + let heaviest_am = coord.get_canonical_affirmation_map().unwrap(); + assert_eq!(heaviest_am, AffirmationMap::decode("papa").unwrap()); + } else if burn_block == 29 { + let tx = burnchain.tx_begin().unwrap(); + tx.clear_override_affirmation_map(1).unwrap(); + tx.clear_override_affirmation_map(2).unwrap(); + tx.clear_override_affirmation_map(3).unwrap(); + tx.clear_override_affirmation_map(4).unwrap(); + tx.clear_override_affirmation_map(5).unwrap(); + tx.clear_override_affirmation_map(6).unwrap(); + tx.clear_override_affirmation_map(7).unwrap(); + tx.commit().unwrap(); + + // unblinded chain syncs with blinded chain + eprintln!("Handle new burnchain block {} (not blind)", burn_block); + + replay_reward_cycle(&stacks_blocks, &b, &mut coord, &sort_db, &mut chainstate); + eprintln!("Replay rc 0 for unblinded db"); + replay_reward_cycle(&stacks_blocks, &b, &mut coord, &sort_db, &mut chainstate); + eprintln!("Replay rc 1 for unblinded db"); + replay_reward_cycle(&stacks_blocks, &b, &mut coord, &sort_db, &mut chainstate); + eprintln!("Replay rc 2 for unblinded db"); + replay_reward_cycle(&stacks_blocks, &b, &mut coord, &sort_db, &mut chainstate); + eprintln!("Replay rc 3 for unblinded db"); + replay_reward_cycle(&stacks_blocks, &b, &mut coord, &sort_db, &mut chainstate); + eprintln!("Replay rc 4 for unblinded db"); + replay_reward_cycle(&stacks_blocks, &b, &mut coord, &sort_db, &mut chainstate); + eprintln!("Replay rc 5 for unblinded db"); + replay_reward_cycle(&stacks_blocks, &b, &mut coord, &sort_db, &mut chainstate); + eprintln!("Replay rc 6 for unblinded db"); + replay_reward_cycle(&stacks_blocks, &b, &mut coord, &sort_db, &mut chainstate); + + let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); + for burn_height in (burn_block - 10)..burn_block { + let ancestor = SortitionDB::get_ancestor_snapshot( + &sort_db.index_conn(), + burn_height, + &tip.sortition_id, + ) + .unwrap() + .unwrap(); + for (_, blk, bh) in stacks_blocks.iter() { + if ancestor.sortition + && ancestor.winning_stacks_block_hash == blk.block_hash() + && *bh == burn_height + { + eprintln!( + "Replay {} from {} (not blind)", + burn_height, + &blk.block_hash() + ); + preprocess_block(&mut chainstate, &sort_db, &ancestor, blk.clone()); + coord.handle_new_stacks_block().unwrap(); + } + } + } + + let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); + let blinded_tip = + SortitionDB::get_canonical_burn_chain_tip(sort_db_blind.conn()).unwrap(); + + let tip_at_29 = + SortitionDB::get_ancestor_snapshot(&sort_db.index_conn(), 29, &tip.sortition_id) + .unwrap() + .unwrap(); + let blinded_tip_at_29 = SortitionDB::get_ancestor_snapshot( + &sort_db_blind.index_conn(), + 29, + &blinded_tip.sortition_id, + ) + .unwrap() + .unwrap(); + + assert_eq!(tip_at_29.consensus_hash, blinded_tip_at_29.consensus_hash); + } else { + eprintln!("Handle new burnchain block {} (not blind)", burn_block); + for _ in 0..(2 * b.pox_constants.reward_cycle_length + 1) { + coord.handle_new_burnchain_block().unwrap(); + coord.handle_new_stacks_block().unwrap(); + } + + eprintln!("Handle new burnchain block {} (blind)", burn_block); + for _ in 0..(2 * b.pox_constants.reward_cycle_length + 1) { + coord_blind.handle_new_burnchain_block().unwrap(); + coord_blind.handle_new_stacks_block().unwrap(); + } + } + + let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); + let blinded_tip = SortitionDB::get_canonical_burn_chain_tip(sort_db_blind.conn()).unwrap(); + + snapshots.insert(tip.block_height, tip.clone()); + snapshots_blind.insert(blinded_tip.block_height, blinded_tip.clone()); + + if fork_num % 2 == 0 { + // load the block into staging and process it on the un-blinded sortition DB + let block_hash = block.header.block_hash(); + eprintln!("Block hash={}, parent={}, height={}, burn_block={}, sortition_tip={}, ix={} (not blind)", &block_hash, &block.header.parent_block, block.header.total_work.work, burn_block, &tip.sortition_id, ix); + + stacks_blocks.push((tip.sortition_id.clone(), block.clone(), tip.block_height)); + + if tip.winning_stacks_block_hash == block_hash { + eprintln!("Preprocess {} (not blind)", &block_hash); + preprocess_block(&mut chainstate, &sort_db, &tip, block.clone()); + } else { + eprintln!("Will NOT preprocess {} (not blind)", &block_hash); + } + + // handle the stacks block + coord.handle_new_stacks_block().unwrap(); + } + if ix == 0 || (fork_num % 2) == 1 { + // load the block into staging and process it on the blinded sortition DB + let block_hash = block.header.block_hash(); + eprintln!("Block hash={}, parent={}, height={}, burn_block={}, sortition_tip={}, ix={} (blind)", &block_hash, &block.header.parent_block, block.header.total_work.work, burn_block, &blinded_tip.sortition_id, ix); + + if ix != 0 { + stacks_blocks.push(( + blinded_tip.sortition_id.clone(), + block.clone(), + blinded_tip.block_height, + )); + } + + if blinded_tip.winning_stacks_block_hash == block_hash { + eprintln!("Preprocess {} (blind)", &block_hash); + preprocess_block(&mut chainstate_blind, &sort_db_blind, &blinded_tip, block); + } else { + eprintln!("Will NOT preprocess {} (blind)", &block_hash); + } + + // handle the stacks block + coord_blind.handle_new_stacks_block().unwrap(); + } + } + + // unblinded chain goes to burn block 30; blinded chain goes to burn block 29. + // But, they share the same history. + let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); + let blinded_tip = SortitionDB::get_canonical_burn_chain_tip(sort_db_blind.conn()).unwrap(); + + assert_eq!(tip.block_height, 30); + assert_eq!(blinded_tip.block_height, 29); + + let tip_29 = SortitionDB::get_ancestor_snapshot( + &sort_db.index_conn(), + blinded_tip.block_height, + &tip.sortition_id, + ) + .unwrap() + .unwrap(); + assert_eq!( + tip_29.winning_stacks_block_hash, + blinded_tip.winning_stacks_block_hash + ); + assert_eq!(tip_29.burn_header_hash, blinded_tip.burn_header_hash); + assert_eq!(tip_29.consensus_hash, blinded_tip.consensus_hash); + + let heaviest_am = coord.get_canonical_affirmation_map().unwrap(); + assert_eq!(heaviest_am, AffirmationMap::decode("apapap").unwrap()); + + let block_height = eval_at_chain_tip(path, &sort_db, "block-height"); + assert_eq!(block_height, Value::UInt(11)); + + // the blinded burnchain DB is doing just fine -- it even has the next Stacks block! + let heaviest_am = coord_blind.get_canonical_affirmation_map().unwrap(); + assert_eq!(heaviest_am, AffirmationMap::decode("apapap").unwrap()); + + let block_height_blind = eval_at_chain_tip(path_blinded, &sort_db_blind, "block-height"); + assert_eq!(block_height_blind, Value::UInt(12)); + + let stacks_tip = SortitionDB::get_canonical_stacks_chain_tip_hash(sort_db.conn()).unwrap(); + let stacks_tip_blind = + SortitionDB::get_canonical_stacks_chain_tip_hash(sort_db_blind.conn()).unwrap(); + let stacks_tip_blind_11 = { + let mut chainstate = get_chainstate(path_blinded); + let mut tx = chainstate.index_tx_begin().unwrap(); + let hdr = StacksChainState::get_index_tip_ancestor( + &mut tx, + &StacksBlockHeader::make_index_block_hash(&stacks_tip_blind.0, &stacks_tip_blind.1), + block_height.expect_u128() as u64, + ) + .unwrap() + .unwrap(); + (hdr.consensus_hash, hdr.anchored_header.block_hash()) + }; + + assert_eq!(stacks_tip, stacks_tip_blind_11); + + // because of the affirmations, the canonical PoX ID deliberately omits anchor blocks + { + let ic = sort_db_blind.index_handle_at_tip(); + let pox_id = ic.get_pox_id().unwrap(); + assert_eq!(&pox_id.to_string(), "1101010"); + } + { + let ic = sort_db.index_handle_at_tip(); + let pox_id = ic.get_pox_id().unwrap(); + assert_eq!(&pox_id.to_string(), "1101010"); + } } #[test] @@ -3097,6 +4199,7 @@ fn test_pox_no_anchor_selected() { }; produce_burn_block( + &b, &mut burnchain, &burnchain_tip.block_hash, vec![op], @@ -3304,6 +4407,7 @@ fn test_pox_fork_out_of_order() { ) }; produce_burn_block( + &b, &mut burnchain, &burnchain_tip.block_hash, vec![op], @@ -3548,6 +4652,11 @@ fn test_pox_fork_out_of_order() { fn eval_at_chain_tip(chainstate_path: &str, sort_db: &SortitionDB, eval: &str) -> Value { let stacks_tip = SortitionDB::get_canonical_stacks_chain_tip_hash(sort_db.conn()).unwrap(); + test_debug!( + "Canonical chain tip at {} is {:?}", + chainstate_path, + &stacks_tip + ); let mut chainstate = get_chainstate(chainstate_path); chainstate .with_read_only_clarity_tx( From 38c503920924f0411a42d9416ac3f4a9226d2e2b Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 14 Jun 2021 01:49:08 -0400 Subject: [PATCH 014/116] feat: add unit_test_2_1() to StacksEpoch for instantiating a list of epochs for Stacks 2.1, and remove some dead code --- src/core/mod.rs | 49 +++++++++++++++++++++---------------------------- 1 file changed, 21 insertions(+), 28 deletions(-) diff --git a/src/core/mod.rs b/src/core/mod.rs index 23f1f86c3b..d2ab0c35cd 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -198,6 +198,27 @@ impl StacksEpoch { ] } + #[cfg(test)] + pub fn unit_test_2_1(first_burnchain_height: u64) -> Vec { + vec![ + StacksEpoch { + epoch_id: StacksEpochId::Epoch10, + start_height: 0, + end_height: 0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch20, + start_height: 0, + end_height: first_burnchain_height, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch21, + start_height: first_burnchain_height, + end_height: STACKS_EPOCH_MAX, + }, + ] + } + pub fn all(first_burnchain_height: u64, epoch_2_1_block_height: u64) -> Vec { vec![ StacksEpoch { @@ -281,31 +302,3 @@ pub const STACKS_EPOCHS_REGTEST: &[StacksEpoch] = &[ }, ]; -/// Synchronize burn transactions from the Bitcoin blockchain -pub fn sync_burnchain_bitcoin( - working_dir: &String, - network_name: &String, -) -> Result { - use burnchains::bitcoin::indexer::BitcoinIndexer; - let channels = CoordinatorCommunication::instantiate(); - - let mut burnchain = - Burnchain::new(working_dir, &"bitcoin".to_string(), network_name).map_err(|e| { - error!( - "Failed to instantiate burn chain driver for {}: {:?}", - network_name, e - ); - e - })?; - - let new_height_res = burnchain.sync::(&channels.1, None, None); - let new_height = new_height_res.map_err(|e| { - error!( - "Failed to synchronize Bitcoin chain state for {} in {}", - network_name, working_dir - ); - e - })?; - - Ok(new_height) -} From f3f163d4ea0ff79dd5dc43c915c379f16ce3893f Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 14 Jun 2021 01:49:36 -0400 Subject: [PATCH 015/116] refactor: synchronize burnchain API with main.rs --- src/main.rs | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/src/main.rs b/src/main.rs index d752d39f79..4469968a45 100644 --- a/src/main.rs +++ b/src/main.rs @@ -758,14 +758,8 @@ simulating a miner. let indexer: BitcoinIndexer = burnchain.make_indexer().unwrap(); let (mut new_sortition_db, _) = burnchain.connect_db(&indexer, true).unwrap(); - let old_burnchaindb = BurnchainDB::connect( - &old_burnchaindb_path, - first_burnchain_block_height, - &first_burnchain_block_hash, - BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP.into(), - true, - ) - .unwrap(); + let old_burnchaindb = + BurnchainDB::connect(&old_burnchaindb_path, &burnchain, true).unwrap(); let mut boot_data = ChainStateBootData { initial_balances, @@ -864,9 +858,11 @@ simulating a miner. let BurnchainBlockData { header: burn_block_header, ops: blockstack_txs, - } = old_burnchaindb - .get_burnchain_block(&old_snapshot.burn_header_hash) - .unwrap(); + } = BurnchainDB::get_burnchain_block( + &old_burnchaindb.conn(), + &old_snapshot.burn_header_hash, + ) + .unwrap(); if old_snapshot.parent_burn_header_hash == BurnchainHeaderHash::sentinel() { // skip initial snapshot -- it's a placeholder continue; From f4c82f0dcc636b1b17325848d62b76299370cee6 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 14 Jun 2021 01:49:51 -0400 Subject: [PATCH 016/116] refactor: synchronize network tests with burnchain --- src/net/mod.rs | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/src/net/mod.rs b/src/net/mod.rs index b0dd4fd78c..e83b907f51 100644 --- a/src/net/mod.rs +++ b/src/net/mod.rs @@ -1889,6 +1889,7 @@ pub mod test { use address::*; use burnchains::bitcoin::address::*; + use burnchains::bitcoin::indexer::BitcoinIndexer; use burnchains::bitcoin::keys::*; use burnchains::bitcoin::*; use burnchains::burnchain::*; @@ -2332,9 +2333,7 @@ pub mod test { let _burnchain_blocks_db = BurnchainDB::connect( &config.burnchain.get_burnchaindb_path(), - first_burnchain_block_height, - &first_burnchain_block_hash, - 0, + &config.burnchain, true, ) .unwrap(); @@ -2742,8 +2741,15 @@ pub mod test { let mut burnchain_db = BurnchainDB::open(&self.config.burnchain.get_burnchaindb_path(), true).unwrap(); + + let indexer: BitcoinIndexer = self.config.burnchain.make_indexer().unwrap(); burnchain_db - .raw_store_burnchain_block(block_header.clone(), blockstack_ops) + .raw_store_burnchain_block( + &self.config.burnchain, + &indexer, + block_header.clone(), + blockstack_ops, + ) .unwrap(); (block_header.block_height, block_header_hash) From 58eef6a3d28da706ad53a23490914d7e2be8b416 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 14 Jun 2021 01:50:08 -0400 Subject: [PATCH 017/116] refactor: use a native sortition DB method for finding the parent sortition of a Stacks block --- src/net/relay.rs | 54 +++++++++++------------------------------------- 1 file changed, 12 insertions(+), 42 deletions(-) diff --git a/src/net/relay.rs b/src/net/relay.rs index c4df97f0ec..9e2c06f7e4 100644 --- a/src/net/relay.rs +++ b/src/net/relay.rs @@ -494,49 +494,19 @@ impl Relayer { block: &StacksBlock, download_time: u64, ) -> Result { - // find the snapshot of the parent of this block - let db_handle = SortitionHandleConn::open_reader_consensus(sort_ic, consensus_hash)?; - let parent_block_snapshot = match db_handle - .get_block_snapshot_of_parent_stacks_block(consensus_hash, &block.block_hash()) + if let Some(parent_block_snapshot) = + sort_ic.find_parent_snapshot_for_stacks_block(consensus_hash, &block.block_hash())? { - Ok(Some((_, sn))) => { - debug!( - "Parent of {}/{} is {}/{}", - consensus_hash, - block.block_hash(), - sn.consensus_hash, - sn.winning_stacks_block_hash - ); - sn - } - Ok(None) => { - debug!( - "Received block with unknown parent snapshot: {}/{}", - consensus_hash, - &block.block_hash() - ); - return Ok(false); - } - Err(db_error::InvalidPoxSortition) => { - warn!( - "Received block {}/{} on a non-canonical PoX sortition", - consensus_hash, - &block.block_hash() - ); - return Ok(false); - } - Err(e) => { - return Err(e.into()); - } - }; - - chainstate.preprocess_anchored_block( - sort_ic, - consensus_hash, - block, - &parent_block_snapshot.consensus_hash, - download_time, - ) + chainstate.preprocess_anchored_block( + sort_ic, + consensus_hash, + block, + &parent_block_snapshot.consensus_hash, + download_time, + ) + } else { + Ok(false) + } } /// Coalesce a set of microblocks into relayer hints and MicroblocksData messages, as calculated by From 1ce98707d3eb7fd419a53e775313a470fc7923e3 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 14 Jun 2021 01:50:28 -0400 Subject: [PATCH 018/116] feat: add has_prefix() method to PoxId to see if a PoxId is a prefix of another --- src/types/chainstate.rs | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/src/types/chainstate.rs b/src/types/chainstate.rs index 7e9a07e899..136276f8e6 100644 --- a/src/types/chainstate.rs +++ b/src/types/chainstate.rs @@ -113,6 +113,24 @@ impl PoxId { pub fn num_inventory_reward_cycles(&self) -> usize { self.0.len().saturating_sub(1) } + + pub fn has_prefix(&self, prefix: &PoxId) -> bool { + if self.len() < prefix.len() { + return false; + } + + for i in 0..prefix.len() { + if self.0[i] != prefix.0[i] { + return false; + } + } + + true + } + + pub fn into_inner(self) -> Vec { + self.0 + } } impl fmt::Display for PoxId { From 3a3288948182293d45b29e5bcc2a103e8813f7e4 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 14 Jun 2021 01:50:46 -0400 Subject: [PATCH 019/116] feat: FromRow implementation --- src/util/db.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/util/db.rs b/src/util/db.rs index 12bf7ce205..d3e37fe7f1 100644 --- a/src/util/db.rs +++ b/src/util/db.rs @@ -203,6 +203,13 @@ impl FromColumn for QualifiedContractIdentifier { } } +impl FromRow for bool { + fn from_row<'a>(row: &'a Row) -> Result { + let x: bool = row.get_unwrap(0); + Ok(x) + } +} + pub fn u64_to_sql(x: u64) -> Result { if x > (i64::max_value() as u64) { return Err(Error::ParseError); From 186f1f722585a8b383d3801c1f0a3927fb89d361 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 14 Jun 2021 01:51:01 -0400 Subject: [PATCH 020/116] refactor: synchronize Bitcoin sync logic in the Stacks node with the new burnchain API --- .../burnchains/bitcoin_regtest_controller.rs | 20 +++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index efbe561f7a..1c19682243 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -325,10 +325,9 @@ impl BitcoinRegtestController { } fn receive_blocks_helium(&mut self) -> BurnchainTip { - let (mut burnchain, mut burnchain_indexer) = self.setup_indexer_runtime(); - let (block_snapshot, state_transition) = loop { - match burnchain.sync_with_indexer_deprecated(&mut burnchain_indexer) { + let (mut burnchain, burnchain_indexer) = self.setup_indexer_runtime(); + match burnchain.sync_with_indexer_deprecated(burnchain_indexer) { Ok(x) => { break x; } @@ -398,13 +397,14 @@ impl BitcoinRegtestController { } }; - let (mut burnchain, mut burnchain_indexer) = self.setup_indexer_runtime(); let (block_snapshot, burnchain_height, state_transition) = loop { if !self.should_keep_running() { return Err(BurnchainControllerError::CoordinatorClosed); } + + let (mut burnchain, burnchain_indexer) = self.setup_indexer_runtime(); match burnchain.sync_with_indexer( - &mut burnchain_indexer, + burnchain_indexer, coordinator_comms.clone(), target_block_height_opt, Some(burnchain.pox_constants.reward_cycle_length as u64), @@ -432,6 +432,7 @@ impl BitcoinRegtestController { .expect("Sortition DB error.") .expect("BUG: no data for the canonical chain tip"); + let (_, burnchain_indexer) = self.setup_indexer_runtime(); let burnchain_height = burnchain_indexer .get_highest_header_height() .map_err(BurnchainControllerError::IndexerError)?; @@ -1065,9 +1066,12 @@ impl BitcoinRegtestController { break; } - let parent = burnchain_db - .get_burnchain_block(&burn_chain_tip.parent_block_hash) - .ok()?; + let parent = BurnchainDB::get_burnchain_block( + &burnchain_db.conn(), + &burn_chain_tip.parent_block_hash, + ) + .ok()?; + burn_chain_tip = parent.header; traversal_depth += 1; } From 64e20a7438ec50a972d25adf2eb41cd684827316 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 14 Jun 2021 01:51:18 -0400 Subject: [PATCH 021/116] refactor: sync neon integration tests with new burnchain API --- testnet/stacks-node/src/tests/neon_integrations.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index dc4859370b..8c6c55bd78 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -1084,9 +1084,8 @@ fn bitcoind_resubmission_test() { .unwrap(); let burn_tip = burnchain_db.get_canonical_chain_tip().unwrap(); - let last_burn_block = burnchain_db - .get_burnchain_block(&burn_tip.block_hash) - .unwrap(); + let last_burn_block = + BurnchainDB::get_burnchain_block(burnchain_db.conn(), &burn_tip.block_hash).unwrap(); assert_eq!( last_burn_block.ops.len(), From c77c3a6fb8eb2712bfd3b20c697d95af7416f3ad Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 14 Jun 2021 17:46:23 -0400 Subject: [PATCH 022/116] fix: create the burnchain DB and its parent directory if they don't exist, and have the test framework actually go and connect to the DB (instead of trying to open an existing DB) --- src/burnchains/burnchain.rs | 17 +++++++++++------ src/burnchains/db.rs | 9 ++++++++- src/burnchains/mod.rs | 3 ++- src/core/mod.rs | 1 - 4 files changed, 21 insertions(+), 9 deletions(-) diff --git a/src/burnchains/burnchain.rs b/src/burnchains/burnchain.rs index 963924512e..4b0e083010 100644 --- a/src/burnchains/burnchain.rs +++ b/src/burnchains/burnchain.rs @@ -525,12 +525,17 @@ impl Burnchain { first_block_height: u64, first_block_hash: &BurnchainHeaderHash, ) -> Burnchain { - let mut ret = Burnchain::new( - &"/unit-tests".to_string(), - &"bitcoin".to_string(), - &"mainnet".to_string(), - ) - .unwrap(); + use rand::rngs::ThreadRng; + use rand::thread_rng; + use rand::RngCore; + + let mut rng = thread_rng(); + let mut byte_tail = [0u8; 16]; + rng.fill_bytes(&mut byte_tail); + + let tmp_path = format!("/tmp/unit-tests-{}", &to_hex(&byte_tail)); + let mut ret = + Burnchain::new(&tmp_path, &"bitcoin".to_string(), &"mainnet".to_string()).unwrap(); ret.first_block_height = first_block_height; ret.initial_reward_start_block = first_block_height; ret.first_block_hash = first_block_hash.clone(); diff --git a/src/burnchains/db.rs b/src/burnchains/db.rs index 128612f0c2..43d72207f7 100644 --- a/src/burnchains/db.rs +++ b/src/burnchains/db.rs @@ -17,7 +17,7 @@ use std::fmt; use std::collections::{HashMap, HashSet}; -use std::{cmp, fs, io}; +use std::{cmp, fs, io, path::Path}; use rusqlite::{ types::ToSql, Connection, OpenFlags, OptionalExtension, Row, Transaction, NO_PARAMS, @@ -927,6 +927,13 @@ impl BurnchainDB { // need to create if readwrite { create_flag = true; + let ppath = Path::new(path); + let pparent_path = ppath + .parent() + .expect(&format!("BUG: no parent of '{}'", path)); + fs::create_dir_all(&pparent_path) + .map_err(|e| BurnchainError::from(DBError::IOError(e)))?; + OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE } else { return Err(BurnchainError::from(DBError::NoDBError)); diff --git a/src/burnchains/mod.rs b/src/burnchains/mod.rs index a14b72742a..2abe835385 100644 --- a/src/burnchains/mod.rs +++ b/src/burnchains/mod.rs @@ -1134,7 +1134,8 @@ pub mod test { let blockstack_txs = self.txs.clone(); - let burnchain_db = BurnchainDB::open(&burnchain.get_burnchaindb_path(), true).unwrap(); + let burnchain_db = + BurnchainDB::connect(&burnchain.get_burnchaindb_path(), &burnchain, true).unwrap(); let new_snapshot = sortition_db_handle .process_block_txs( diff --git a/src/core/mod.rs b/src/core/mod.rs index cb266404d4..a8519692c9 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -310,4 +310,3 @@ pub const STACKS_EPOCHS_REGTEST: &[StacksEpoch] = &[ end_height: STACKS_EPOCH_MAX, }, ]; - From bfbf56e3d44efbc6a4f5a680ea36b48a785cd579 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 15 Jun 2021 14:35:46 -0400 Subject: [PATCH 023/116] fix: test coverage for forget_orphaned_epoch_data(); clarify behavior of has_stored_block() to include testing the presence of the block's processed bit in the staging_blocks table (and checking against the headers DB to see if a block has truly been added to the chainstate when considering a new block) --- src/chainstate/coordinator/tests.rs | 11 ++-- src/chainstate/stacks/db/blocks.rs | 93 +++++++++++++++++++++++++---- 2 files changed, 87 insertions(+), 17 deletions(-) diff --git a/src/chainstate/coordinator/tests.rs b/src/chainstate/coordinator/tests.rs index 73c6c4dc6f..ec8bd4b967 100644 --- a/src/chainstate/coordinator/tests.rs +++ b/src/chainstate/coordinator/tests.rs @@ -3365,14 +3365,11 @@ fn test_pox_affirmation_fork_duel() { let mut fork_start = 0; let mut sortition_fork_start = 0; - // setup: - // sort:1 6 11 16 21 - // |----- rc 0 --------|------- rc 1 --------|----- rc 2 ------------|-------- rc 3 ----------|----- rc 4 - // ix: X - 0 - 1 - 2 - 3 - 4 ------------------- 10 - 11 - 12 - 13 - 14 ------------------------- 20 - 21 - // \________________ 5 _ 6 _ 7 _ 8 _ 9 ________________________ 15 _ 16 _ 17 _ 18 _ 19 + // setup (sortitions block height, with Stacks blocks): + // + // not blind: 2 - 3 - 4 ------------------- 10 - 11 - 12 - 13 - 14 - 15 ------------------------ 21 - 22 - 23 - 24 - 25 + // blind: \________ 5 _ 6 _ 7 _ 8 _ 9 _____________________________ 16 _ 17 _ 18 _ 19 _ 20 ________________________ 26 _ 27 _ 28 _ 29 // - // 2 - 3 - 4 ------------------- 10 - 11 - 12 - 13 - 14 - 15 ------------------------ 21 - 22 - 23 - 24 - 25 - // \________ 5 _ 6 _ 7 _ 8 _ 9 _____________________________ 16 _ 17 _ 18 _ 19 _ 20 ________________________ 26 _ 27 _ 28 _ 29 for (ix, (vrf_key, miner)) in vrf_keys.iter().zip(committers.iter()).enumerate() { let mut burnchain = get_burnchain_db(path, pox_consts.clone()); let burnchain_blind = get_burnchain_db(path_blinded, pox_consts.clone()); diff --git a/src/chainstate/stacks/db/blocks.rs b/src/chainstate/stacks/db/blocks.rs index 8865fe05f5..944d504f86 100644 --- a/src/chainstate/stacks/db/blocks.rs +++ b/src/chainstate/stacks/db/blocks.rs @@ -709,9 +709,10 @@ impl StacksChainState { StacksChainState::get_staging_block_status(blocks_db, consensus_hash, block_hash)? .map(|processed| !processed); - let index_block_hash = StacksBlockHeader::make_index_block_hash(consensus_hash, block_hash); match staging_status_opt { Some(staging_status) => { + let index_block_hash = + StacksBlockHeader::make_index_block_hash(consensus_hash, block_hash); if staging_status { // not processed yet test_debug!( @@ -728,7 +729,7 @@ impl StacksChainState { } } None => { - // no row in the DB, so not processet at all. + // no row in the DB, so not processed at all. Ok(false) } } @@ -2203,8 +2204,6 @@ impl StacksChainState { consensus_hash, anchored_block_hash, )?; - let _block_path = - StacksChainState::make_block_dir(blocks_path, consensus_hash, anchored_block_hash)?; let rows = query_rows::(tx, &sql, args).map_err(Error::DBError)?; let block = match rows.len() { @@ -4906,7 +4905,13 @@ impl StacksChainState { let block_size = next_staging_block.block_data.len() as u64; // sanity check -- don't process this block again if we already did so - if StacksChainState::has_stored_block( + if StacksChainState::has_stacks_block( + chainstate_tx.tx.deref().deref(), + &StacksBlockHeader::make_index_block_hash( + &next_staging_block.consensus_hash, + &next_staging_block.anchored_block_hash, + ), + )? || StacksChainState::has_stored_block( chainstate_tx.tx.deref().deref(), &blocks_path, &next_staging_block.consensus_hash, @@ -6264,7 +6269,9 @@ pub mod test { ) .unwrap(); assert!(fs::metadata(&path).is_ok()); - assert!(StacksChainState::has_stored_block( + + // empty block is considered _not_ stored + assert!(!StacksChainState::has_stored_block( &chainstate.db(), &chainstate.blocks_path, &ConsensusHash([1u8; 20]), @@ -6282,7 +6289,8 @@ pub mod test { #[test] fn stacks_db_block_load_store() { - let chainstate = instantiate_chainstate(false, 0x80000000, "stacks_db_block_load_store"); + let mut chainstate = + instantiate_chainstate(false, 0x80000000, "stacks_db_block_load_store"); let privk = StacksPrivateKey::from_hex( "eb05c83546fdd2c79f10f5ad5434a90dd28f7e3acb7c092157aa1bc3656b012c01", ) @@ -6309,9 +6317,30 @@ pub mod test { ) .unwrap()); - StacksChainState::store_block(&chainstate.blocks_path, &ConsensusHash([1u8; 20]), &block) - .unwrap(); - assert!(fs::metadata(&path).is_ok()); + assert!(!StacksChainState::has_stored_block( + &chainstate.db(), + &chainstate.blocks_path, + &ConsensusHash([1u8; 20]), + &block.block_hash() + ) + .unwrap()); + + store_staging_block( + &mut chainstate, + &ConsensusHash([1u8; 20]), + &block, + &ConsensusHash([2u8; 20]), + 1, + 2, + ); + + set_block_processed( + &mut chainstate, + &ConsensusHash([1u8; 20]), + &block.block_hash(), + true, + ); + assert!(StacksChainState::has_stored_block( &chainstate.db(), &chainstate.blocks_path, @@ -6319,6 +6348,7 @@ pub mod test { &block.block_hash() ) .unwrap()); + assert!(StacksChainState::load_block( &chainstate.blocks_path, &ConsensusHash([1u8; 20]), @@ -6353,6 +6383,7 @@ pub mod test { &block.header, ); + // database determines that it's still there assert!(StacksChainState::has_stored_block( &chainstate.db(), &chainstate.blocks_path, @@ -6360,6 +6391,48 @@ pub mod test { &block.block_hash() ) .unwrap()); + assert!(StacksChainState::load_block( + &chainstate.blocks_path, + &ConsensusHash([1u8; 20]), + &block.block_hash() + ) + .unwrap() + .is_none()); + + set_block_processed( + &mut chainstate, + &ConsensusHash([1u8; 20]), + &block.block_hash(), + false, + ); + + // still technically stored -- we processed it + assert!(StacksChainState::has_stored_block( + &chainstate.db(), + &chainstate.blocks_path, + &ConsensusHash([1u8; 20]), + &block.block_hash() + ) + .unwrap()); + + let mut dbtx = chainstate.db_tx_begin().unwrap(); + StacksChainState::forget_orphaned_epoch_data( + &mut dbtx, + &ConsensusHash([1u8; 20]), + &block.block_hash(), + ) + .unwrap(); + dbtx.commit().unwrap(); + + // *now* it's not there + assert!(!StacksChainState::has_stored_block( + &chainstate.db(), + &chainstate.blocks_path, + &ConsensusHash([1u8; 20]), + &block.block_hash() + ) + .unwrap()); + assert!(StacksChainState::load_block( &chainstate.blocks_path, &ConsensusHash([1u8; 20]), From 0e2c6c801d15ea4898678de370c90f6c0027dcd1 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 18 Jun 2021 17:16:04 -0400 Subject: [PATCH 024/116] fix: remove dead code, and comment on the behavior of affirmation map calculation --- src/burnchains/db.rs | 48 ++++++++------------------------------------ 1 file changed, 8 insertions(+), 40 deletions(-) diff --git a/src/burnchains/db.rs b/src/burnchains/db.rs index 43d72207f7..bc9687c31a 100644 --- a/src/burnchains/db.rs +++ b/src/burnchains/db.rs @@ -476,46 +476,11 @@ impl<'a> BurnchainDBTransaction<'a> { Ok(()) } - /// Update the anchor block descendancy information for the _reward_ phase of a reward cycle. - /// That is, for each block-commit in this reward cycle, mark it as descending from this reward - /// cycle's anchor block (if it exists), or not. If there is no anchor block, then no block in - /// this reward cycle descends from an anchor block. Each reward-phase block-commit's affirmation - /// map is updated by this method. - /// Only call after the reward cycle's prepare phase's affirmation maps and descendancy information has been - /// updated. - pub fn update_reward_phase_descendancies( - &self, - indexer: &B, - reward_cycle: u64, - burnchain: &Burnchain, - ) -> Result<(), BurnchainError> { - let first_block_height = burnchain.reward_cycle_to_block_height(reward_cycle); - let last_block_height = burnchain.reward_cycle_to_block_height(reward_cycle + 1) - - (burnchain.pox_constants.prepare_length as u64); - let hdrs = indexer.read_burnchain_headers(first_block_height, last_block_height)?; - let reward_phase_end = - cmp::min(last_block_height, first_block_height + (hdrs.len() as u64)); - - test_debug!( - "Update reward-phase descendancies for reward cycle {} over {} headers between {}-{}", - reward_cycle, - hdrs.len(), - first_block_height, - reward_phase_end - ); - - for block_height in first_block_height..reward_phase_end { - let hdr = &hdrs[(block_height - first_block_height) as usize]; - self.update_block_descendancy(indexer, hdr, burnchain)?; - } - - test_debug!( - "Updated reward-phase descendancies for reward cycle {}", - reward_cycle - ); - Ok(()) - } - + /// Create a prepare-phase affirmation map. This is only done at the very end of a reward + /// cycle, once the anchor block is chosen and a new reward cycle is about to begin. This + /// method updates the prepare-phase block-commit's affirmation map to reflect what its miner + /// believes to be the state of all anchor blocks, _including_ this new reward cycle's anchor + /// block. pub fn make_prepare_phase_affirmation_map( &self, indexer: &B, @@ -697,6 +662,9 @@ impl<'a> BurnchainDBTransaction<'a> { } } + /// Make an affirmation map for a block commit in a reward phase (or an in-progress prepare + /// phase). This is done once per Bitcoin block, as block-commits are stored. Affirmation + /// maps for prepare-phase commits will be recomputed once the reward cycle finishes. fn make_reward_phase_affirmation_map( &self, burnchain: &Burnchain, From bf8de600e4c9c4dd7d60407eb464d8fbb2155b4f Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 18 Jun 2021 17:16:42 -0400 Subject: [PATCH 025/116] fix: remove commented-out code, and document expected behavior for handling unaffirmed anchor blocks --- src/chainstate/coordinator/mod.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/chainstate/coordinator/mod.rs b/src/chainstate/coordinator/mod.rs index 5b92d1ee08..fc18e5edc6 100644 --- a/src/chainstate/coordinator/mod.rs +++ b/src/chainstate/coordinator/mod.rs @@ -692,7 +692,7 @@ impl<'a, T: BlockEventDispatcher, N: CoordinatorNotices, U: RewardSetProvider> // careful -- we might have already procesed sortitions in this // reward cycle with this PoX ID, but that were never confirmed let start_height = last_invalidate_start_block; - let end_height = canonical_burnchain_tip.block_height; // start_height + (self.burnchain.pox_constants.reward_cycle_length as u64); + 1; + let end_height = canonical_burnchain_tip.block_height; for height in start_height..end_height { let snapshots_and_pox_ids = self.get_snapshots_and_pox_ids_at_height(height)?; @@ -1064,10 +1064,14 @@ impl<'a, T: BlockEventDispatcher, N: CoordinatorNotices, U: RewardSetProvider> } pub fn get_canonical_affirmation_map(&self) -> Result { + // if we don't have an unaffirmed anchor block, and we're no longer in the initial block + // download, then assume that it's absent. Otherwise, if we are in the initial block + // download but we don't have it yet, assume that it's present. BurnchainDB::get_canonical_affirmation_map( self.burnchain_blocks_db.conn(), &self.burnchain, |anchor_block_commit, anchor_block_metadata| { + // TODO: check IBD status (issue #2474) self.has_unaffirmed_pox_anchor_block(anchor_block_commit, anchor_block_metadata) }, ) @@ -1560,6 +1564,8 @@ impl<'a, T: BlockEventDispatcher, N: CoordinatorNotices, U: RewardSetProvider> let reward_cycle = self.burnchain.block_height_to_reward_cycle(commit.block_height) .expect("BUG: accepted block commit has a block height before the first reward cycle"); + // TODO: this is probably wrong -- if the anchor block is + // unaffirmed, then this will prevent processing it! if canonical_am .at(reward_cycle) .unwrap_or(AffirmationMapEntry::Nothing) From cb47f44bba7e38f0ba2d136df401ab13c4cafdf7 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 18 Jun 2021 17:18:17 -0400 Subject: [PATCH 026/116] fix: remove now-covered TODO --- src/chainstate/coordinator/mod.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/chainstate/coordinator/mod.rs b/src/chainstate/coordinator/mod.rs index fc18e5edc6..300b72e67e 100644 --- a/src/chainstate/coordinator/mod.rs +++ b/src/chainstate/coordinator/mod.rs @@ -1564,11 +1564,9 @@ impl<'a, T: BlockEventDispatcher, N: CoordinatorNotices, U: RewardSetProvider> let reward_cycle = self.burnchain.block_height_to_reward_cycle(commit.block_height) .expect("BUG: accepted block commit has a block height before the first reward cycle"); - // TODO: this is probably wrong -- if the anchor block is - // unaffirmed, then this will prevent processing it! if canonical_am .at(reward_cycle) - .unwrap_or(AffirmationMapEntry::Nothing) + .unwrap_or(AffirmationMapEntry::PoxAnchorBlockAbsent) == AffirmationMapEntry::PoxAnchorBlockPresent { // yup, we're expecting this From 993c124f53b4581e6351727bf13c42d018f967b1 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 6 Jul 2021 13:05:21 -0400 Subject: [PATCH 027/116] fix: revert to original invalid block-processing code, since this new code doesn't make any real improvement to it (and could litter the block store with lots of empty files) --- src/chainstate/burn/sortition.rs | 2 +- src/chainstate/coordinator/comm.rs | 4 +- src/chainstate/coordinator/tests.rs | 2 +- src/chainstate/stacks/db/blocks.rs | 72 ++++++++++++++++------------- src/chainstate/stacks/miner.rs | 2 +- 5 files changed, 44 insertions(+), 38 deletions(-) diff --git a/src/chainstate/burn/sortition.rs b/src/chainstate/burn/sortition.rs index 462a1a5582..0a11a8edde 100644 --- a/src/chainstate/burn/sortition.rs +++ b/src/chainstate/burn/sortition.rs @@ -409,7 +409,7 @@ impl BlockSnapshot { #[cfg(test)] mod test { use address::*; - use burnchains::test::*; + use burnchains::tests::*; use burnchains::*; use chainstate::burn::db::sortdb::*; use chainstate::burn::operations::*; diff --git a/src/chainstate/coordinator/comm.rs b/src/chainstate/coordinator/comm.rs index 1439330494..e095b14ca8 100644 --- a/src/chainstate/coordinator/comm.rs +++ b/src/chainstate/coordinator/comm.rs @@ -167,7 +167,7 @@ impl CoordinatorChannels { return false; } thread::sleep(Duration::from_millis(100)); - std::sync::atomic::spin_loop_hint(); + std::hint::spin_loop(); } return true; } @@ -179,7 +179,7 @@ impl CoordinatorChannels { return false; } thread::sleep(Duration::from_millis(100)); - std::sync::atomic::spin_loop_hint(); + std::hint::spin_loop(); } return true; } diff --git a/src/chainstate/coordinator/tests.rs b/src/chainstate/coordinator/tests.rs index ec8bd4b967..d74e8ce4f4 100644 --- a/src/chainstate/coordinator/tests.rs +++ b/src/chainstate/coordinator/tests.rs @@ -31,7 +31,7 @@ use burnchains::affirmation::*; use burnchains::bitcoin::address::BitcoinAddress; use burnchains::bitcoin::indexer::BitcoinIndexer; use burnchains::bitcoin::BitcoinNetworkType; -use burnchains::db::tests::*; +use burnchains::tests::db::*; use burnchains::{db::*, *}; use chainstate; use chainstate::burn::db::sortdb::SortitionDB; diff --git a/src/chainstate/stacks/db/blocks.rs b/src/chainstate/stacks/db/blocks.rs index 944d504f86..ad626c4cff 100644 --- a/src/chainstate/stacks/db/blocks.rs +++ b/src/chainstate/stacks/db/blocks.rs @@ -777,43 +777,49 @@ impl StacksChainState { StacksChainState::make_block_dir(blocks_dir, consensus_hash, &block_header_hash) .expect("FATAL: failed to create block directory"); - // try make this thread-safe. It's okay if this block gets copied more than once; we - // only care that at least one copy survives for further analysis. - let random_bytes = thread_rng().gen::<[u8; 8]>(); - let random_bytes_str = to_hex(&random_bytes); - let index_block_hash = StacksBlockId::new(consensus_hash, block_header_hash); - let mut invalid_path = - StacksChainState::get_index_block_pathbuf(blocks_dir, &index_block_hash); - invalid_path - .file_name() - .expect("FATAL: index block path did not have file name"); - invalid_path.set_extension(&format!("invalid-{}", &random_bytes_str)); - - fs::copy(&block_path, &invalid_path).expect(&format!( - "FATAL: failed to copy '{}' to '{}'", - &block_path, - &invalid_path.to_string_lossy(), - )); - - // already freed? - let sz = fs::metadata(&invalid_path) - .expect(&format!( - "FATAL: failed to stat '{}'", - &invalid_path.to_string_lossy() - )) + let sz = fs::metadata(&block_path) + .expect(&format!("FATAL: failed to stat '{}'", &block_path)) .len(); if sz > 0 { - // truncate the original - fs::OpenOptions::new() - .read(false) - .write(true) - .truncate(true) - .open(&block_path) + // try make this thread-safe. It's okay if this block gets copied more than once; we + // only care that at least one copy survives for further analysis. + let random_bytes = thread_rng().gen::<[u8; 8]>(); + let random_bytes_str = to_hex(&random_bytes); + let index_block_hash = StacksBlockId::new(consensus_hash, block_header_hash); + let mut invalid_path = + StacksChainState::get_index_block_pathbuf(blocks_dir, &index_block_hash); + invalid_path + .file_name() + .expect("FATAL: index block path did not have file name"); + invalid_path.set_extension(&format!("invalid-{}", &random_bytes_str)); + + fs::copy(&block_path, &invalid_path).expect(&format!( + "FATAL: failed to copy '{}' to '{}'", + &block_path, + &invalid_path.to_string_lossy(), + )); + + // already freed? + let sz = fs::metadata(&invalid_path) .expect(&format!( - "FATAL: Failed to mark block path '{}' as free", - &block_path - )); + "FATAL: failed to stat '{}'", + &invalid_path.to_string_lossy() + )) + .len(); + + if sz > 0 { + // truncate the original + fs::OpenOptions::new() + .read(false) + .write(true) + .truncate(true) + .open(&block_path) + .expect(&format!( + "FATAL: Failed to mark block path '{}' as free", + &block_path + )); + } } } diff --git a/src/chainstate/stacks/miner.rs b/src/chainstate/stacks/miner.rs index c8a5bc51f2..dda4aa962f 100644 --- a/src/chainstate/stacks/miner.rs +++ b/src/chainstate/stacks/miner.rs @@ -1536,7 +1536,7 @@ pub mod test { use rand::Rng; use address::*; - use burnchains::test::*; + use burnchains::tests::*; use burnchains::*; use chainstate::burn::db::sortdb::*; use chainstate::burn::operations::{ From 5bc69e50c5d7a214d11454d1fae8599478beec72 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 6 Jul 2021 13:06:29 -0400 Subject: [PATCH 028/116] refactor/fix: put all burnchain tests into their own test directory, since they have grown quite big. Also, fix a bug in find_heaviest_block_commit() so we don't undercount confirmations and BTC burnt (and update the tests to check this!) --- src/burnchains/affirmation.rs | 2565 +-------------------------- src/burnchains/burnchain.rs | 1146 +----------- src/burnchains/db.rs | 654 +------ src/burnchains/mod.rs | 1130 +----------- src/burnchains/tests/affirmation.rs | 2387 +++++++++++++++++++++++++ src/burnchains/tests/burnchain.rs | 1121 ++++++++++++ src/burnchains/tests/db.rs | 591 ++++++ src/burnchains/tests/mod.rs | 1128 ++++++++++++ src/net/mod.rs | 2 +- 9 files changed, 5332 insertions(+), 5392 deletions(-) create mode 100644 src/burnchains/tests/affirmation.rs create mode 100644 src/burnchains/tests/burnchain.rs create mode 100644 src/burnchains/tests/db.rs create mode 100644 src/burnchains/tests/mod.rs diff --git a/src/burnchains/affirmation.rs b/src/burnchains/affirmation.rs index 5a69adfb68..1842d0d222 100644 --- a/src/burnchains/affirmation.rs +++ b/src/burnchains/affirmation.rs @@ -18,6 +18,7 @@ use std::cmp; use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; use std::convert::{TryFrom, TryInto}; use std::fmt; +use std::fmt::Write; use std::sync::mpsc::SyncSender; use std::time::Duration; @@ -55,23 +56,13 @@ pub enum AffirmationMapEntry { } impl AffirmationMapEntry { - pub fn parse(s: &str) -> Option { - if s.len() != 1 { - return None; + pub fn from_chr(c: char) -> Option { + match c { + 'p' => Some(AffirmationMapEntry::PoxAnchorBlockPresent), + 'a' => Some(AffirmationMapEntry::PoxAnchorBlockAbsent), + 'n' => Some(AffirmationMapEntry::Nothing), + _ => None, } - - for chr in s.chars() { - let next = match chr { - 'p' => AffirmationMapEntry::PoxAnchorBlockPresent, - 'a' => AffirmationMapEntry::PoxAnchorBlockAbsent, - 'n' => AffirmationMapEntry::Nothing, - _ => { - return None; - } - }; - return Some(next); - } - return None; } } @@ -98,17 +89,16 @@ impl fmt::Debug for AffirmationMapEntry { impl fmt::Display for AffirmationMap { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "AM(")?; for entry in self.affirmations.iter() { write!(f, "{}", &entry)?; } - write!(f, ")") + Ok(()) } } impl fmt::Debug for AffirmationMap { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str(&format!("{}", &self)) + write!(f, "{}", self) } } @@ -126,11 +116,7 @@ impl AffirmationMap { } pub fn at(&self, reward_cycle: u64) -> Option { - if reward_cycle >= self.affirmations.len() as u64 { - None - } else { - Some(self.affirmations[reward_cycle as usize]) - } + self.affirmations.get(reward_cycle as usize).cloned() } pub fn push(&mut self, entry: AffirmationMapEntry) { @@ -145,21 +131,15 @@ impl AffirmationMap { self.affirmations.len() } - pub fn reward_cycle(&self) -> u64 { - self.affirmations.len() as u64 - } - pub fn as_slice(&self) -> &[AffirmationMapEntry] { &self.affirmations } // used to store to database pub fn encode(&self) -> String { - let mut ret = vec![]; - for entry in self.affirmations.iter() { - ret.push(format!("{}", entry)); - } - ret.join("") + let mut ret = String::with_capacity(self.affirmations.len()); + write!(&mut ret, "{}", self).expect("BUG: failed to serialize affirmations -- likely OOM"); + ret } // used for database from-row @@ -168,17 +148,13 @@ impl AffirmationMap { return None; } - let mut affirmations = vec![]; + let mut affirmations = Vec::with_capacity(s.len()); for chr in s.chars() { - let next = match chr { - 'p' => AffirmationMapEntry::PoxAnchorBlockPresent, - 'a' => AffirmationMapEntry::PoxAnchorBlockAbsent, - 'n' => AffirmationMapEntry::Nothing, - _ => { - return None; - } - }; - affirmations.push(next); + if let Some(next) = AffirmationMapEntry::from_chr(chr) { + affirmations.push(next); + } else { + return None; + } } Some(AffirmationMap { affirmations }) } @@ -509,12 +485,15 @@ pub fn filter_missed_block_commits( /// the block-commit that has the most cumulative BTC committed behind it (and the highest /// such in the event of a tie), as well as at least `anchor_threshold` confirmations. If the anchor block /// commit is found, return the descendancy matrix for it as well. +/// Returns Some(the winning block commit, descendancy matrix, total confirmations, total burnt) if +/// there's an anchor block commit. +/// Returns None otherwise pub fn find_heaviest_block_commit<'a, B: BurnchainHeaderReader>( burnchain_tx: &BurnchainDBTransaction<'a>, indexer: &B, prepare_ops: &Vec>, anchor_threshold: u32, -) -> Result>)>, DBError> { +) -> Result>, u64, u64)>, DBError> { // sanity check -- must be in order by block height and vtxindex for prepare_block_ops in prepare_ops.iter() { let mut expected_block_height = None; @@ -541,10 +520,10 @@ pub fn find_heaviest_block_commit<'a, B: BurnchainHeaderReader>( } } - // map (block_height, vtxindex) to (burnt, parent_block_height, parent_vtxindex) + // map (block_height, vtxindex) to (parent_block_height, parent_vtxindex) let mut parents = BTreeMap::new(); - // map (block_height, vtxindex) to (non-prepare-ancestor-height, non-prepare-ancestor-vtxindex, total_burnt) + // map (block_height, vtxindex) to (non-prepare-ancestor-height, non-prepare-ancestor-vtxindex) let mut ancestors = BTreeMap::new(); // map (non-prepare-ancestor-height, non-prepare-ancestor-vtxindex) to (set-of-block-heights, total_burnt) @@ -557,7 +536,6 @@ pub fn find_heaviest_block_commit<'a, B: BurnchainHeaderReader>( parents.insert( (opdata.block_height, opdata.vtxindex), ( - opdata.burn_fee, opdata.parent_block_ptr as u64, opdata.parent_vtxindex as u32, ), @@ -565,26 +543,14 @@ pub fn find_heaviest_block_commit<'a, B: BurnchainHeaderReader>( } } - // calculate the ancestor map -- find the highest ancestor for each prepare-phase block-commit - // that is _not_ in the prepare phase. + // calculate the ancestor map -- find the highest non-prepare-phase ancestor for each prepare-phase block-commit. for prepare_block_ops in prepare_ops.iter().rev() { for opdata in prepare_block_ops.iter() { let mut cursor = (opdata.block_height, opdata.vtxindex); - let mut total_burnt = 0; - while !ancestors.contains_key(&cursor) { - if let Some((burnt, parent_block, parent_vtxindex)) = parents.get(&cursor) { - cursor = (*parent_block, *parent_vtxindex); - total_burnt += *burnt; - } else { - break; - } - } - if !ancestors.contains_key(&cursor) { - ancestors.insert( - (opdata.block_height, opdata.vtxindex), - (cursor.0, cursor.1, total_burnt), - ); + while let Some((parent_block, parent_vtxindex)) = parents.get(&cursor) { + cursor = (*parent_block, *parent_vtxindex); } + ancestors.insert((opdata.block_height, opdata.vtxindex), (cursor.0, cursor.1)); } } @@ -592,20 +558,20 @@ pub fn find_heaviest_block_commit<'a, B: BurnchainHeaderReader>( // block-commits that descend from each pre-prepare-phase ancestor for prepare_block_ops in prepare_ops.iter() { for opdata in prepare_block_ops.iter() { - if let Some((ancestor_height, ancestor_vtxindex, total_burnt)) = + if let Some((ancestor_height, ancestor_vtxindex)) = ancestors.get(&(opdata.block_height, opdata.vtxindex)) { if let Some((ref mut confirmed_block_set, ref mut ancestor_burnt)) = ancestor_confirmations.get_mut(&(*ancestor_height, *ancestor_vtxindex)) { confirmed_block_set.insert(opdata.block_height); - *ancestor_burnt = cmp::max(*total_burnt, *ancestor_burnt); + *ancestor_burnt += opdata.burn_fee; } else { let mut block_set = HashSet::new(); block_set.insert(opdata.block_height); ancestor_confirmations.insert( (*ancestor_height, *ancestor_vtxindex), - (block_set, *total_burnt), + (block_set, opdata.burn_fee), ); } } @@ -635,6 +601,8 @@ pub fn find_heaviest_block_commit<'a, B: BurnchainHeaderReader>( if confs < anchor_threshold.into() { continue; } + + // only consider an earlier ancestor if it burned more than the candidate if *burnt > most_burnt { most_burnt = *burnt; most_confs = confs; @@ -678,9 +646,18 @@ pub fn find_heaviest_block_commit<'a, B: BurnchainHeaderReader>( &opdata.txid, opdata.block_height, opdata.vtxindex, most_burnt, most_confs ); + // sanity check -- there should be exactly as many confirmations on the suspected + // anchor block as there are distinct descendancies. + let mut conf_count = 0; + + // sanity check -- there should be exactly as many BTC burnt for the suspected + // anchor block as the most_burnt. + let mut burn_count = 0; + let mut descendancy = Vec::with_capacity(prepare_ops.len()); for prepare_block_ops in prepare_ops.iter() { let mut block_descendancy = Vec::with_capacity(prepare_ops.len()); + let mut found_conf = false; for opdata in prepare_block_ops.iter() { if let Some((op_ancestor_height, op_ancestor_vtxindex, ..)) = ancestors.get(&(opdata.block_height, opdata.vtxindex)) @@ -688,21 +665,29 @@ pub fn find_heaviest_block_commit<'a, B: BurnchainHeaderReader>( if *op_ancestor_height == ancestor_block && *op_ancestor_vtxindex == ancestor_vtxindex { - test_debug!("Block-commit {},{} descends from likely PoX anchor block {},{}", opdata.block_height, opdata.vtxindex, op_ancestor_height, op_ancestor_vtxindex); + debug!("Block-commit {},{} descends from likely PoX anchor block {},{}", opdata.block_height, opdata.vtxindex, op_ancestor_height, op_ancestor_vtxindex); block_descendancy.push(true); + if !found_conf { + conf_count += 1; + found_conf = true; + } + burn_count += opdata.burn_fee; } else { - test_debug!("Block-commit {},{} does NOT descend from likely PoX anchor block {},{}", opdata.block_height, opdata.vtxindex, ancestor_block, ancestor_vtxindex); + debug!("Block-commit {},{} does NOT descend from likely PoX anchor block {},{}", opdata.block_height, opdata.vtxindex, ancestor_block, ancestor_vtxindex); block_descendancy.push(false); } } else { - test_debug!("Block-commit {},{} does NOT descend from likely PoX anchor block {},{}", opdata.block_height, opdata.vtxindex, ancestor_block, ancestor_vtxindex); + debug!("Block-commit {},{} does NOT descend from likely PoX anchor block {},{}", opdata.block_height, opdata.vtxindex, ancestor_block, ancestor_vtxindex); block_descendancy.push(false); } } descendancy.push(block_descendancy); } - return Ok(Some((opdata, descendancy))); + assert_eq!(conf_count, most_confs); + assert_eq!(burn_count, most_burnt); + + return Ok(Some((opdata, descendancy, most_confs, most_burnt))); } } } @@ -766,7 +751,7 @@ pub fn find_pox_anchor_block<'a, B: BurnchainHeaderReader>( &prepare_ops_valid, burnchain.pox_constants.anchor_threshold, )?; - if let Some((ref anchor_block_commit, _)) = anchor_block_and_descendancy_opt.as_ref() { + if let Some((ref anchor_block_commit, ..)) = anchor_block_and_descendancy_opt.as_ref() { // cannot have been an anchor block in some other reward cycle let md = BurnchainDB::get_commit_metadata( burnchain_tx.conn(), @@ -796,7 +781,11 @@ pub fn find_pox_anchor_block<'a, B: BurnchainHeaderReader>( ); } - Ok((prepare_ops_valid, anchor_block_and_descendancy_opt)) + Ok(( + prepare_ops_valid, + anchor_block_and_descendancy_opt + .map(|(anchor_block_commit, descendancy, ..)| (anchor_block_commit, descendancy)), + )) } /// Update a completed reward cycle's affirmation maps @@ -881,2435 +870,3 @@ pub fn update_pox_affirmation_maps( Ok(()) } - -#[cfg(test)] -mod test { - use super::*; - use std::cmp; - use std::collections::HashSet; - use std::collections::VecDeque; - use std::sync::{ - atomic::{AtomicBool, AtomicU64, Ordering}, - mpsc::sync_channel, - Arc, RwLock, - }; - - use rusqlite::Connection; - - use address; - use burnchains::bitcoin::address::BitcoinAddress; - use burnchains::bitcoin::indexer::BitcoinIndexer; - use burnchains::bitcoin::BitcoinNetworkType; - use burnchains::db::tests::*; - use burnchains::{db::*, *}; - use chainstate; - use chainstate::burn::db::sortdb::SortitionDB; - use chainstate::burn::operations::leader_block_commit::*; - use chainstate::burn::operations::*; - use chainstate::burn::*; - use chainstate::coordinator::{Error as CoordError, *}; - use chainstate::stacks::*; - use clarity_vm::clarity::ClarityConnection; - use core; - use core::*; - use monitoring::increment_stx_blocks_processed_counter; - use util::hash::{hex_bytes, Hash160}; - use util::vrf::*; - use vm::{ - costs::{ExecutionCost, LimitedCostTracker}, - types::PrincipalData, - types::QualifiedContractIdentifier, - Value, - }; - - use crate::types::chainstate::StacksBlockId; - use crate::types::chainstate::{ - BlockHeaderHash, BurnchainHeaderHash, PoxId, SortitionId, StacksAddress, VRFSeed, - }; - use crate::types::proof::TrieHash; - use crate::{types, util}; - - use chainstate::coordinator::tests::*; - - #[test] - fn affirmation_map_encode_decode() { - assert_eq!(AffirmationMap::decode(""), Some(AffirmationMap::empty())); - assert_eq!( - AffirmationMap::decode("anp"), - Some(AffirmationMap { - affirmations: vec![ - AffirmationMapEntry::PoxAnchorBlockAbsent, - AffirmationMapEntry::Nothing, - AffirmationMapEntry::PoxAnchorBlockPresent - ] - }) - ); - assert_eq!(AffirmationMap::decode("x"), None); - - assert_eq!(AffirmationMap::empty().encode(), "".to_string()); - assert_eq!( - AffirmationMap { - affirmations: vec![ - AffirmationMapEntry::PoxAnchorBlockAbsent, - AffirmationMapEntry::Nothing, - AffirmationMapEntry::PoxAnchorBlockPresent - ] - } - .encode(), - "anp".to_string() - ); - } - - #[test] - fn affirmation_map_find_divergence() { - assert_eq!( - AffirmationMap::decode("aaa") - .unwrap() - .find_divergence(&AffirmationMap::decode("aaa").unwrap()), - None - ); - assert_eq!( - AffirmationMap::decode("aaa") - .unwrap() - .find_divergence(&AffirmationMap::decode("aaaa").unwrap()), - Some(3) - ); - assert_eq!( - AffirmationMap::decode("aaa") - .unwrap() - .find_divergence(&AffirmationMap::decode("aa").unwrap()), - None - ); - assert_eq!( - AffirmationMap::decode("apa") - .unwrap() - .find_divergence(&AffirmationMap::decode("aaa").unwrap()), - Some(1) - ); - assert_eq!( - AffirmationMap::decode("apa") - .unwrap() - .find_divergence(&AffirmationMap::decode("aaaa").unwrap()), - Some(1) - ); - assert_eq!( - AffirmationMap::decode("naa") - .unwrap() - .find_divergence(&AffirmationMap::decode("aa").unwrap()), - Some(0) - ); - assert_eq!( - AffirmationMap::decode("napn") - .unwrap() - .find_divergence(&AffirmationMap::decode("").unwrap()), - None - ); - assert_eq!( - AffirmationMap::decode("pn") - .unwrap() - .find_divergence(&AffirmationMap::decode("n").unwrap()), - Some(0) - ); - } - - fn make_simple_key_register( - burn_header_hash: &BurnchainHeaderHash, - block_height: u64, - vtxindex: u32, - ) -> LeaderKeyRegisterOp { - LeaderKeyRegisterOp { - consensus_hash: ConsensusHash::from_bytes( - &hex_bytes("2222222222222222222222222222222222222222").unwrap(), - ) - .unwrap(), - public_key: VRFPublicKey::from_bytes( - &hex_bytes("a366b51292bef4edd64063d9145c617fec373bceb0758e98cd72becd84d54c7a") - .unwrap(), - ) - .unwrap(), - memo: vec![01, 02, 03, 04, 05], - address: StacksAddress::from_bitcoin_address( - &BitcoinAddress::from_scriptpubkey( - BitcoinNetworkType::Testnet, - &hex_bytes("76a9140be3e286a15ea85882761618e366586b5574100d88ac").unwrap(), - ) - .unwrap(), - ), - - txid: next_txid(), - vtxindex: vtxindex, - block_height: block_height, - burn_header_hash: burn_header_hash.clone(), - } - } - - pub fn make_reward_cycle_with_vote( - burnchain_db: &mut BurnchainDB, - burnchain: &Burnchain, - key: &LeaderKeyRegisterOp, - headers: &mut Vec, - mut parent_commits: Vec>, - confirm_anchor_block: bool, - ) -> ( - Vec, - Vec>>, - ) { - let mut new_headers = vec![]; - let mut new_commits = vec![]; - - let first_block_header = burnchain_db.get_first_header().unwrap(); - let mut current_header = burnchain_db.get_canonical_chain_tip().unwrap(); - let mut height = current_header.block_height + 1; - let mut parent_block_header: Option = - Some(headers.last().unwrap().to_owned()); - - for i in 0..burnchain.pox_constants.reward_cycle_length { - let block_header = BurnchainBlockHeader { - block_height: height, - block_hash: next_burn_header_hash(), - parent_block_hash: parent_block_header - .as_ref() - .map(|blk| blk.block_hash.clone()) - .unwrap_or(first_block_header.block_hash.clone()), - num_txs: parent_commits.len() as u64, - timestamp: i as u64, - }; - - let ops = if current_header == first_block_header { - // first-ever block -- add only the leader key - let mut key_insert = key.clone(); - key_insert.burn_header_hash = block_header.block_hash.clone(); - - test_debug!( - "Insert key-register in {}: {},{},{} in block {}", - &key_insert.burn_header_hash, - &key_insert.txid, - key_insert.block_height, - key_insert.vtxindex, - block_header.block_height - ); - - new_commits.push(vec![None; parent_commits.len()]); - vec![BlockstackOperationType::LeaderKeyRegister( - key_insert.clone(), - )] - } else { - let mut commits = vec![]; - for i in 0..parent_commits.len() { - let mut block_commit = make_simple_block_commit( - &burnchain, - parent_commits[i].as_ref(), - &block_header, - next_block_hash(), - ); - block_commit.key_block_ptr = key.block_height as u32; - block_commit.key_vtxindex = key.vtxindex as u16; - block_commit.vtxindex += i as u32; - block_commit.burn_parent_modulus = if height > 0 { - ((height - 1) % BURN_BLOCK_MINED_AT_MODULUS) as u8 - } else { - BURN_BLOCK_MINED_AT_MODULUS as u8 - 1 - }; - - assert_eq!(block_commit.burn_header_hash, block_header.block_hash); - assert_eq!(block_commit.block_height, block_header.block_height); - - let append = if !burnchain.is_in_prepare_phase(block_commit.block_height) { - // non-prepare-phase commits always confirm their parent - true - } else { - if confirm_anchor_block { - // all block-commits confirm anchor block - true - } else { - // fewer than anchor_threshold commits confirm anchor block - let next_rc_start = burnchain.reward_cycle_to_block_height( - burnchain - .block_height_to_reward_cycle(block_commit.block_height) - .unwrap() - + 1, - ); - if block_commit.block_height - + (burnchain.pox_constants.anchor_threshold as u64) - + 1 - < next_rc_start - { - // in first half of prepare phase, so confirm - true - } else { - // in second half of prepare phase, so don't confirm - false - } - } - }; - - if append { - test_debug!( - "Insert block-commit in {}: {},{},{}, builds on {},{}", - &block_commit.burn_header_hash, - &block_commit.txid, - block_commit.block_height, - block_commit.vtxindex, - block_commit.parent_block_ptr, - block_commit.parent_vtxindex - ); - - if let Some(ref parent_commit) = parent_commits[i].as_ref() { - assert!( - parent_commit.block_height as u64 - != block_commit.block_height as u64 - ); - assert!( - parent_commit.block_height as u64 - == block_commit.parent_block_ptr as u64 - ); - assert!( - parent_commit.vtxindex as u64 - == block_commit.parent_vtxindex as u64 - ); - } - - parent_commits[i] = Some(block_commit.clone()); - commits.push(Some(block_commit.clone())); - } else { - test_debug!( - "Do NOT insert block-commit in {}: {},{},{}", - &block_commit.burn_header_hash, - &block_commit.txid, - block_commit.block_height, - block_commit.vtxindex - ); - - commits.push(None); - } - } - new_commits.push(commits.clone()); - commits - .into_iter() - .filter_map(|cmt| cmt) - .map(|cmt| BlockstackOperationType::LeaderBlockCommit(cmt)) - .collect() - }; - - burnchain_db - .store_new_burnchain_block_ops_unchecked(burnchain, headers, &block_header, &ops) - .unwrap(); - - headers.push(block_header.clone()); - new_headers.push(block_header.clone()); - parent_block_header = Some(block_header); - - current_header = burnchain_db.get_canonical_chain_tip().unwrap(); - height = current_header.block_height + 1; - } - - (new_headers, new_commits) - } - - fn make_simple_reward_cycle( - burnchain_db: &mut BurnchainDB, - burnchain: &Burnchain, - key: &LeaderKeyRegisterOp, - headers: &mut Vec, - parent_commit: Option, - ) -> (Vec, Vec>) { - let (new_headers, commits) = - make_reward_cycle(burnchain_db, burnchain, key, headers, vec![parent_commit]); - ( - new_headers, - commits - .into_iter() - .map(|mut cmts| cmts.pop().unwrap()) - .collect(), - ) - } - - pub fn make_reward_cycle( - burnchain_db: &mut BurnchainDB, - burnchain: &Burnchain, - key: &LeaderKeyRegisterOp, - headers: &mut Vec, - parent_commits: Vec>, - ) -> ( - Vec, - Vec>>, - ) { - make_reward_cycle_with_vote(burnchain_db, burnchain, key, headers, parent_commits, true) - } - - pub fn make_reward_cycle_without_anchor( - burnchain_db: &mut BurnchainDB, - burnchain: &Burnchain, - key: &LeaderKeyRegisterOp, - headers: &mut Vec, - parent_commits: Vec>, - ) -> ( - Vec, - Vec>>, - ) { - make_reward_cycle_with_vote(burnchain_db, burnchain, key, headers, parent_commits, false) - } - - #[test] - fn test_read_prepare_phase_commits() { - let first_bhh = BurnchainHeaderHash([0; 32]); - let first_timestamp = 0; - let first_height = 0; - - let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = PoxConstants::new(10, 5, 3, 3, 0, 99, 100); - burnchain.first_block_height = first_height; - burnchain.first_block_hash = first_bhh.clone(); - burnchain.first_block_timestamp = first_timestamp; - - let mut burnchain_db = BurnchainDB::connect(":memory:", &burnchain, true).unwrap(); - - let first_block_header = burnchain_db.get_canonical_chain_tip().unwrap(); - assert_eq!(&first_block_header.block_hash, &first_bhh); - assert_eq!(first_block_header.block_height, first_height); - assert_eq!(first_block_header.timestamp, first_timestamp as u64); - /* - assert_eq!( - &first_block_header.parent_block_hash, - &BurnchainHeaderHash::sentinel() - ); - */ - eprintln!( - "First block parent is {}", - &first_block_header.parent_block_hash - ); - - let mut headers = vec![first_block_header.clone()]; - let key_register = make_simple_key_register(&first_block_header.block_hash, 0, 1); - let (next_headers, commits) = make_simple_reward_cycle( - &mut burnchain_db, - &burnchain, - &key_register, - &mut headers, - None, - ); - - assert_eq!( - commits.len() as u32, - burnchain.pox_constants.reward_cycle_length - ); - assert!(commits[0].is_none()); - for i in 1..burnchain.pox_constants.reward_cycle_length { - assert!(commits[i as usize].is_some()); - } - - let all_ops = read_prepare_phase_commits( - &burnchain_db.tx_begin().unwrap(), - &headers, - &burnchain.pox_constants, - first_block_header.block_height, - 0, - ) - .unwrap(); - assert_eq!(all_ops.len() as u32, burnchain.pox_constants.prepare_length); - for i in 0..burnchain.pox_constants.prepare_length { - assert_eq!(all_ops[i as usize].len(), 1); - - let opdata = &all_ops[i as usize][0]; - assert_eq!( - opdata, - commits[(i + burnchain.pox_constants.reward_cycle_length - - burnchain.pox_constants.prepare_length) as usize] - .as_ref() - .unwrap() - ); - } - } - - #[test] - fn test_parent_block_commits() { - let first_bhh = BurnchainHeaderHash([0; 32]); - let first_timestamp = 0; - let first_height = 0; - - let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = PoxConstants::new(10, 5, 3, 3, 0, 99, 100); - burnchain.first_block_height = first_height; - burnchain.first_block_hash = first_bhh.clone(); - burnchain.first_block_timestamp = first_timestamp; - - let mut burnchain_db = BurnchainDB::connect(":memory:", &burnchain, true).unwrap(); - - let first_block_header = burnchain_db.get_canonical_chain_tip().unwrap(); - - let mut headers = vec![first_block_header.clone()]; - let key_register = make_simple_key_register(&first_block_header.block_hash, 0, 1); - - // first reward cycle is all (linear) commits, so it must elect an anchor block - let (next_headers, commits) = make_simple_reward_cycle( - &mut burnchain_db, - &burnchain, - &key_register, - &mut headers, - None, - ); - - let all_ops = read_prepare_phase_commits( - &burnchain_db.tx_begin().unwrap(), - &headers, - &burnchain.pox_constants, - first_block_header.block_height, - 0, - ) - .unwrap(); - let parent_commits = - read_parent_block_commits(&burnchain_db.tx_begin().unwrap(), &headers, &all_ops) - .unwrap(); - - // this is a simple reward cycle -- each block-commit has a unique parent - assert_eq!(parent_commits.len(), all_ops.len()); - - for op_list in all_ops.iter() { - for opdata in op_list.iter() { - let mut found_parent = false; - for parent_commit in parent_commits.iter() { - if parent_commit.block_height == (opdata.parent_block_ptr as u64) - && parent_commit.vtxindex == (opdata.parent_vtxindex as u32) - { - found_parent = true; - break; - } - } - assert!(found_parent, "did not find parent for {:?}", opdata); - } - } - - let mut all_ops_with_orphan = all_ops.clone(); - all_ops_with_orphan[1][0].parent_vtxindex += 1; - - let parent_commits = read_parent_block_commits( - &burnchain_db.tx_begin().unwrap(), - &headers, - &all_ops_with_orphan, - ) - .unwrap(); - - // this is a simple reward cycle -- each block-commit has a unique parent, except for the - // orphan - assert_eq!(parent_commits.len(), all_ops_with_orphan.len() - 1); - - let mut all_ops_with_same_parent = all_ops.clone(); - for ops in all_ops_with_same_parent.iter_mut() { - for opdata in ops.iter_mut() { - opdata.parent_block_ptr = 3; - opdata.parent_vtxindex = 0; - } - } - - let parent_commits = read_parent_block_commits( - &burnchain_db.tx_begin().unwrap(), - &headers, - &all_ops_with_same_parent, - ) - .unwrap(); - - assert_eq!(parent_commits.len(), 1); - assert_eq!(parent_commits[0].block_height, 3); - assert_eq!(parent_commits[0].vtxindex, 0); - } - - #[test] - fn test_filter_orphan_block_commits() { - let first_bhh = BurnchainHeaderHash([0; 32]); - let first_timestamp = 0; - let first_height = 0; - - let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = PoxConstants::new(5, 3, 3, 3, 0, 99, 100); - burnchain.first_block_height = first_height; - burnchain.first_block_hash = first_bhh.clone(); - burnchain.first_block_timestamp = first_timestamp; - - let mut burnchain_db = BurnchainDB::connect(":memory:", &burnchain, true).unwrap(); - - let first_block_header = burnchain_db.get_canonical_chain_tip().unwrap(); - - let mut headers = vec![first_block_header.clone()]; - let key_register = make_simple_key_register(&first_block_header.block_hash, 0, 1); - - // first reward cycle is all (linear) commits, so it must elect an anchor block - let (next_headers, commits) = make_simple_reward_cycle( - &mut burnchain_db, - &burnchain, - &key_register, - &mut headers, - None, - ); - - let all_ops = read_prepare_phase_commits( - &burnchain_db.tx_begin().unwrap(), - &headers, - &burnchain.pox_constants, - first_block_header.block_height, - 0, - ) - .unwrap(); - let parent_commits = - read_parent_block_commits(&burnchain_db.tx_begin().unwrap(), &headers, &all_ops) - .unwrap(); - - let mut all_ops_with_orphan = all_ops.clone(); - all_ops_with_orphan[1][0].parent_vtxindex += 1; - - assert_eq!(all_ops_with_orphan[0].len(), 1); - assert_eq!(all_ops_with_orphan[1].len(), 1); - assert_eq!(all_ops_with_orphan[2].len(), 1); - - let parent_commits = read_parent_block_commits( - &burnchain_db.tx_begin().unwrap(), - &headers, - &all_ops_with_orphan, - ) - .unwrap(); - let filtered_ops = filter_orphan_block_commits(&parent_commits, all_ops_with_orphan); - - assert_eq!(filtered_ops.len(), all_ops.len()); - assert_eq!(filtered_ops[0].len(), 1); - assert_eq!(filtered_ops[1].len(), 0); - assert_eq!(filtered_ops[2].len(), 1); - } - - #[test] - fn test_filter_missed_block_commits() { - let first_bhh = BurnchainHeaderHash([0; 32]); - let first_timestamp = 0; - let first_height = 0; - - let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = PoxConstants::new(5, 3, 3, 3, 0, 99, 100); - burnchain.first_block_height = first_height; - burnchain.first_block_hash = first_bhh.clone(); - burnchain.first_block_timestamp = first_timestamp; - - let mut burnchain_db = BurnchainDB::connect(":memory:", &burnchain, true).unwrap(); - - let first_block_header = burnchain_db.get_canonical_chain_tip().unwrap(); - - let mut headers = vec![first_block_header.clone()]; - let key_register = make_simple_key_register(&first_block_header.block_hash, 0, 1); - - // first reward cycle is all (linear) commits, so it must elect an anchor block - let (next_headers, commits) = make_simple_reward_cycle( - &mut burnchain_db, - &burnchain, - &key_register, - &mut headers, - None, - ); - - let all_ops = read_prepare_phase_commits( - &burnchain_db.tx_begin().unwrap(), - &headers, - &burnchain.pox_constants, - first_block_header.block_height, - 0, - ) - .unwrap(); - let parent_commits = - read_parent_block_commits(&burnchain_db.tx_begin().unwrap(), &headers, &all_ops) - .unwrap(); - - let mut all_ops_with_missed = all_ops.clone(); - all_ops_with_missed[1][0].burn_parent_modulus -= 1; - - assert_eq!(all_ops_with_missed[0].len(), 1); - assert_eq!(all_ops_with_missed[1].len(), 1); - assert_eq!(all_ops_with_missed[2].len(), 1); - - let parent_commits = read_parent_block_commits( - &burnchain_db.tx_begin().unwrap(), - &headers, - &all_ops_with_missed, - ) - .unwrap(); - let filtered_ops = filter_missed_block_commits(all_ops_with_missed); - - assert_eq!(filtered_ops.len(), all_ops.len()); - assert_eq!(filtered_ops[0].len(), 1); - assert_eq!(filtered_ops[1].len(), 0); - assert_eq!(filtered_ops[2].len(), 1); - } - - #[test] - fn test_find_heaviest_block_commit() { - let first_bhh = BurnchainHeaderHash([0; 32]); - let first_timestamp = 0; - let first_height = 0; - - let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = PoxConstants::new(5, 3, 2, 3, 0, 99, 100); - burnchain.first_block_height = first_height; - burnchain.first_block_hash = first_bhh.clone(); - burnchain.first_block_timestamp = first_timestamp; - - let mut burnchain_db = BurnchainDB::connect(":memory:", &burnchain, true).unwrap(); - - let first_block_header = burnchain_db.get_canonical_chain_tip().unwrap(); - - let mut headers = vec![first_block_header.clone()]; - let key_register = make_simple_key_register(&first_block_header.block_hash, 0, 1); - - // first reward cycle is all (linear) commits, so it must elect an anchor block - let (next_headers, commits) = make_simple_reward_cycle( - &mut burnchain_db, - &burnchain, - &key_register, - &mut headers, - None, - ); - - let all_ops = read_prepare_phase_commits( - &burnchain_db.tx_begin().unwrap(), - &headers, - &burnchain.pox_constants, - first_block_header.block_height, - 0, - ) - .unwrap(); - let parent_commits = - read_parent_block_commits(&burnchain_db.tx_begin().unwrap(), &headers, &all_ops) - .unwrap(); - let filtered_ops = filter_orphan_block_commits(&parent_commits, all_ops); - - let heaviest_parent_commit_opt = find_heaviest_block_commit( - &burnchain_db.tx_begin().unwrap(), - &headers, - &filtered_ops, - burnchain.pox_constants.anchor_threshold, - ) - .unwrap(); - assert!(heaviest_parent_commit_opt.is_some()); - let (heaviest_parent_block_commit, descendancy) = heaviest_parent_commit_opt.unwrap(); - - // since this is just a linear chain of block-commits, the heaviest parent is the parent of the - // first block-commit in the prepare phase - assert_eq!(commits[1].as_ref().unwrap(), &heaviest_parent_block_commit); - assert_eq!(descendancy, vec![vec![true], vec![true], vec![true]]); - - // make a forked history, but with a best-tip - // 1,0 <-- 2,0 <-- 3,0 <-- 4,0 - // \ - // `---------------------------- 5,0 - let mut all_ops_forked_majority = filtered_ops.clone(); - all_ops_forked_majority[2][0].parent_block_ptr = 1; - all_ops_forked_majority[2][0].parent_vtxindex = 0; - - // still commit 1 - let heaviest_parent_commit_opt = find_heaviest_block_commit( - &burnchain_db.tx_begin().unwrap(), - &headers, - &all_ops_forked_majority, - burnchain.pox_constants.anchor_threshold, - ) - .unwrap(); - assert!(heaviest_parent_commit_opt.is_some()); - let (heaviest_parent_block_commit, descendancy) = heaviest_parent_commit_opt.unwrap(); - - assert_eq!(commits[1].as_ref().unwrap(), &heaviest_parent_block_commit); - assert_eq!(descendancy, vec![vec![true], vec![true], vec![false]]); - - // make a forked history, with another best-tip winner, but with a deeper fork split - // 1,0 <-- 2,0 <-- 3,0 - // \ - // `------- 4,0 <-- 5,0 - let mut all_ops_forked_majority = filtered_ops.clone(); - all_ops_forked_majority[1][0].parent_block_ptr = 2; - all_ops_forked_majority[1][0].parent_vtxindex = 0; - - all_ops_forked_majority[2][0].parent_block_ptr = 2; - all_ops_forked_majority[2][0].parent_vtxindex = 0; - - // still commit 1 - let heaviest_parent_commit_opt = find_heaviest_block_commit( - &burnchain_db.tx_begin().unwrap(), - &headers, - &all_ops_forked_majority, - burnchain.pox_constants.anchor_threshold, - ) - .unwrap(); - assert!(heaviest_parent_commit_opt.is_some()); - let (heaviest_parent_block_commit, descendancy) = heaviest_parent_commit_opt.unwrap(); - - assert_eq!(commits[1].as_ref().unwrap(), &heaviest_parent_block_commit); - assert_eq!(descendancy, vec![vec![true], vec![true], vec![true]]); - - // make a forked history where there is no best tip, but enough confirmations - // 1,0 <-- 2,0 <-- 3,0 - // |\ - // | `------- 4,0 - // \ - // `------------- 5,0 - let mut all_ops_no_majority = filtered_ops.clone(); - all_ops_no_majority[0][0].parent_block_ptr = 2; - all_ops_no_majority[0][0].parent_vtxindex = 0; - all_ops_no_majority[0][0].burn_fee = 0; - - all_ops_no_majority[1][0].parent_block_ptr = 2; - all_ops_no_majority[1][0].parent_vtxindex = 0; - all_ops_no_majority[1][0].burn_fee = 1; - - all_ops_no_majority[2][0].parent_block_ptr = 2; - all_ops_no_majority[2][0].parent_vtxindex = 0; - all_ops_no_majority[2][0].burn_fee = 2; - - let heaviest_parent_commit_opt = find_heaviest_block_commit( - &burnchain_db.tx_begin().unwrap(), - &headers, - &all_ops_no_majority, - burnchain.pox_constants.anchor_threshold, - ) - .unwrap(); - assert!(heaviest_parent_commit_opt.is_some()); - let (heaviest_parent_block_commit, descendancy) = heaviest_parent_commit_opt.unwrap(); - - assert_eq!(commits[1].as_ref().unwrap(), &heaviest_parent_block_commit); - assert_eq!(descendancy, vec![vec![true], vec![true], vec![true]]); - - // make a forked history where there is no best tip, but enough (majority) confirmations - // 1,0 <-- 2,0 <-- 3,0 - // | \ - // | `-------- 4,0 - // | - // `----------------------- 5,0 - let mut all_ops_no_majority = filtered_ops.clone(); - all_ops_no_majority[0][0].parent_block_ptr = 2; - all_ops_no_majority[0][0].parent_vtxindex = 0; - all_ops_no_majority[0][0].burn_fee = 0; - - all_ops_no_majority[1][0].parent_block_ptr = 2; - all_ops_no_majority[1][0].parent_vtxindex = 0; - all_ops_no_majority[1][0].burn_fee = 1; - - all_ops_no_majority[2][0].parent_block_ptr = 1; - all_ops_no_majority[2][0].parent_vtxindex = 0; - all_ops_no_majority[2][0].burn_fee = 20; - - let heaviest_parent_commit_opt = find_heaviest_block_commit( - &burnchain_db.tx_begin().unwrap(), - &headers, - &all_ops_no_majority, - burnchain.pox_constants.anchor_threshold, - ) - .unwrap(); - assert!(heaviest_parent_commit_opt.is_some()); - let (heaviest_parent_block_commit, descendancy) = heaviest_parent_commit_opt.unwrap(); - - assert_eq!(commits[1].as_ref().unwrap(), &heaviest_parent_block_commit); - assert_eq!(descendancy, vec![vec![true], vec![true], vec![false]]); - - // make a history where there is no anchor block, period - // 1,0 <-- 2,0 X-- 3,0 - // - // X------- 4,0 - // - // X------------ 5,0 - let mut all_ops_no_majority = filtered_ops.clone(); - all_ops_no_majority[0][0].parent_block_ptr = 2; - all_ops_no_majority[0][0].parent_vtxindex = 10; - all_ops_no_majority[0][0].burn_fee = 0; - - all_ops_no_majority[1][0].parent_block_ptr = 2; - all_ops_no_majority[1][0].parent_vtxindex = 10; - all_ops_no_majority[1][0].burn_fee = 1; - - all_ops_no_majority[2][0].parent_block_ptr = 1; - all_ops_no_majority[2][0].parent_vtxindex = 10; - all_ops_no_majority[2][0].burn_fee = 20; - - let heaviest_parent_commit_opt = find_heaviest_block_commit( - &burnchain_db.tx_begin().unwrap(), - &headers, - &all_ops_no_majority, - burnchain.pox_constants.anchor_threshold, - ) - .unwrap(); - assert!(heaviest_parent_commit_opt.is_none()); - } - - #[test] - fn test_find_heaviest_parent_commit_many_commits() { - let first_bhh = BurnchainHeaderHash([0; 32]); - let first_timestamp = 0; - let first_height = 0; - - let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = PoxConstants::new(5, 3, 2, 3, 0, 99, 100); - burnchain.first_block_height = first_height; - burnchain.first_block_hash = first_bhh.clone(); - burnchain.first_block_timestamp = first_timestamp; - - let mut burnchain_db = BurnchainDB::connect(":memory:", &burnchain, true).unwrap(); - - let first_block_header = burnchain_db.get_canonical_chain_tip().unwrap(); - - let mut headers = vec![first_block_header.clone()]; - let key_register = make_simple_key_register(&first_block_header.block_hash, 0, 1); - - let (next_headers, commits) = make_reward_cycle( - &mut burnchain_db, - &burnchain, - &key_register, - &mut headers, - vec![None, None], - ); - - let all_ops = read_prepare_phase_commits( - &burnchain_db.tx_begin().unwrap(), - &headers, - &burnchain.pox_constants, - first_block_header.block_height, - 0, - ) - .unwrap(); - let parent_commits = - read_parent_block_commits(&burnchain_db.tx_begin().unwrap(), &headers, &all_ops) - .unwrap(); - let filtered_ops = filter_orphan_block_commits(&parent_commits, all_ops); - - // make a history with two miners' commits. - // sortition winners in prepare phase were 3,0; 4,1; 5,0 - // 1,0 <-- 2,0 <--- 3,0 <--- 4,0 ,--- 5,0 - // \ \ / - // `---- 3,1 `--- 4,1 <--- 5,1 - let mut all_ops_no_majority = filtered_ops.clone(); - - // 3,0 - all_ops_no_majority[0][0].parent_block_ptr = 2; - all_ops_no_majority[0][0].parent_vtxindex = 0; - all_ops_no_majority[0][0].vtxindex = 0; - all_ops_no_majority[0][0].burn_fee = 1; - - // 3,1 - all_ops_no_majority[0][1].parent_block_ptr = 2; - all_ops_no_majority[0][1].parent_vtxindex = 0; - all_ops_no_majority[0][1].vtxindex = 1; - all_ops_no_majority[0][1].burn_fee = 1; - - // 4,0 - all_ops_no_majority[1][0].parent_block_ptr = 3; - all_ops_no_majority[1][0].parent_vtxindex = 0; - all_ops_no_majority[1][0].vtxindex = 0; - all_ops_no_majority[1][0].burn_fee = 2; - - // 4,1 - all_ops_no_majority[1][1].parent_block_ptr = 3; - all_ops_no_majority[1][1].parent_vtxindex = 0; - all_ops_no_majority[1][1].vtxindex = 1; - all_ops_no_majority[1][1].burn_fee = 2; - - // 5,0 - all_ops_no_majority[2][0].parent_block_ptr = 4; - all_ops_no_majority[2][0].parent_vtxindex = 1; - all_ops_no_majority[2][0].vtxindex = 0; - all_ops_no_majority[2][0].burn_fee = 3; - - // 5,1 - all_ops_no_majority[2][1].parent_block_ptr = 4; - all_ops_no_majority[2][1].parent_vtxindex = 1; - all_ops_no_majority[2][1].vtxindex = 1; - all_ops_no_majority[2][1].burn_fee = 3; - - let heaviest_parent_commit_opt = find_heaviest_block_commit( - &burnchain_db.tx_begin().unwrap(), - &headers, - &all_ops_no_majority, - burnchain.pox_constants.anchor_threshold, - ) - .unwrap(); - assert!(heaviest_parent_commit_opt.is_some()); - let (heaviest_parent_block_commit, descendancy) = heaviest_parent_commit_opt.unwrap(); - - assert_eq!( - commits[1][0].as_ref().unwrap(), - &heaviest_parent_block_commit - ); - assert_eq!( - descendancy, - vec![vec![true, true], vec![true, true], vec![true, true]] - ); - - // make a history with two miners' commits, with some invalid commits. - // The heavier commit descendancy wins -- 2,1 is the anchor block. - // 1,0 <-- 2,0 <--- 3,0 <--- 4,0 <--- 5,0 (winner) - // \ - // `---- 2,1 <--- 3,1 <--- 4,1 <--- 5,1 - let mut all_ops_no_majority = filtered_ops.clone(); - - // 3,0 - all_ops_no_majority[0][0].parent_block_ptr = 2; - all_ops_no_majority[0][0].parent_vtxindex = 0; - all_ops_no_majority[0][0].vtxindex = 0; - all_ops_no_majority[0][0].burn_fee = 1; - - // 3,1 - all_ops_no_majority[0][1].parent_block_ptr = 2; - all_ops_no_majority[0][1].parent_vtxindex = 1; - all_ops_no_majority[0][1].vtxindex = 1; - all_ops_no_majority[0][1].burn_fee = 1; - - // 4,0 - all_ops_no_majority[1][0].parent_block_ptr = 3; - all_ops_no_majority[1][0].parent_vtxindex = 0; - all_ops_no_majority[1][0].vtxindex = 0; - all_ops_no_majority[1][0].burn_fee = 2; - - // 4,1 - all_ops_no_majority[1][1].parent_block_ptr = 3; - all_ops_no_majority[1][1].parent_vtxindex = 1; - all_ops_no_majority[1][1].vtxindex = 1; - all_ops_no_majority[1][1].burn_fee = 2; - - // 5,0 - all_ops_no_majority[2][0].parent_block_ptr = 4; - all_ops_no_majority[2][0].parent_vtxindex = 0; - all_ops_no_majority[2][0].vtxindex = 0; - all_ops_no_majority[2][0].burn_fee = 4; - - // 5,1 - all_ops_no_majority[2][1].parent_block_ptr = 4; - all_ops_no_majority[2][1].parent_vtxindex = 1; - all_ops_no_majority[2][1].vtxindex = 1; - all_ops_no_majority[2][1].burn_fee = 3; - - let heaviest_parent_commit_opt = find_heaviest_block_commit( - &burnchain_db.tx_begin().unwrap(), - &headers, - &all_ops_no_majority, - burnchain.pox_constants.anchor_threshold, - ) - .unwrap(); - assert!(heaviest_parent_commit_opt.is_some()); - let (heaviest_parent_block_commit, descendancy) = heaviest_parent_commit_opt.unwrap(); - - // best option wins - assert_eq!( - commits[1][0].as_ref().unwrap(), - &heaviest_parent_block_commit - ); - assert_eq!( - descendancy, - vec![vec![true, false], vec![true, false], vec![true, false]] - ); - - // make a history with two miners' commits, with some invalid commits. - // commit descendancy weight is a tie, so highest commit is the anchor block (2,1) - // 1,0 <-- 2,0 <--- 3,0 <--- 4,0 <--- 5,0 - // \ - // `---- 2,1 <--- 3,1 <--- 4,1 <--- 5,1 (winner) - let mut all_ops_no_majority = filtered_ops.clone(); - - // 3,0 - all_ops_no_majority[0][0].parent_block_ptr = 2; - all_ops_no_majority[0][0].parent_vtxindex = 0; - all_ops_no_majority[0][0].vtxindex = 0; - all_ops_no_majority[0][0].burn_fee = 1; - - // 3,1 - all_ops_no_majority[0][1].parent_block_ptr = 2; - all_ops_no_majority[0][1].parent_vtxindex = 1; - all_ops_no_majority[0][1].vtxindex = 1; - all_ops_no_majority[0][1].burn_fee = 1; - - // 4,0 - all_ops_no_majority[1][0].parent_block_ptr = 3; - all_ops_no_majority[1][0].parent_vtxindex = 0; - all_ops_no_majority[1][0].vtxindex = 0; - all_ops_no_majority[1][0].burn_fee = 2; - - // 4,1 - all_ops_no_majority[1][1].parent_block_ptr = 3; - all_ops_no_majority[1][1].parent_vtxindex = 1; - all_ops_no_majority[1][1].vtxindex = 1; - all_ops_no_majority[1][1].burn_fee = 2; - - // 5,0 - all_ops_no_majority[2][0].parent_block_ptr = 4; - all_ops_no_majority[2][0].parent_vtxindex = 0; - all_ops_no_majority[2][0].vtxindex = 0; - all_ops_no_majority[2][0].burn_fee = 3; - - // 5,1 - all_ops_no_majority[2][1].parent_block_ptr = 4; - all_ops_no_majority[2][1].parent_vtxindex = 1; - all_ops_no_majority[2][1].vtxindex = 1; - all_ops_no_majority[2][1].burn_fee = 3; - - let heaviest_parent_commit_opt = find_heaviest_block_commit( - &burnchain_db.tx_begin().unwrap(), - &headers, - &all_ops_no_majority, - burnchain.pox_constants.anchor_threshold, - ) - .unwrap(); - assert!(heaviest_parent_commit_opt.is_some()); - let (heaviest_parent_block_commit, descendancy) = heaviest_parent_commit_opt.unwrap(); - - // best option wins - assert_eq!( - commits[1][1].as_ref().unwrap(), - &heaviest_parent_block_commit - ); - assert_eq!( - descendancy, - vec![vec![false, true], vec![false, true], vec![false, true]] - ); - } - - #[test] - fn test_update_pox_affirmation_maps_3_forks() { - let first_bhh = BurnchainHeaderHash([0; 32]); - let first_timestamp = 0; - let first_height = 0; - - let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = PoxConstants::new(10, 5, 3, 3, 0, 99, 100); - burnchain.first_block_height = first_height; - burnchain.first_block_hash = first_bhh.clone(); - burnchain.first_block_timestamp = first_timestamp; - - let mut burnchain_db = BurnchainDB::connect(":memory:", &burnchain, true).unwrap(); - - let first_block_header = burnchain_db.get_canonical_chain_tip().unwrap(); - - let mut headers = vec![first_block_header.clone()]; - let key_register = make_simple_key_register(&first_block_header.block_hash, 0, 1); - - // first reward cycle is all (linear) commits, so it must elect an anchor block - let (next_headers, commits_0) = make_reward_cycle( - &mut burnchain_db, - &burnchain, - &key_register, - &mut headers, - vec![None], - ); - - // no anchor blocks recorded, yet! - let heaviest_am = - BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) - .unwrap(); - let canonical_am = - BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { - true - }) - .unwrap(); - eprintln!( - "rc=0: before update: heaviest = {}, canonical = {}", - &heaviest_am, &canonical_am - ); - - assert_eq!(heaviest_am, AffirmationMap::decode("").unwrap()); - assert_eq!(canonical_am, AffirmationMap::decode("n").unwrap()); - - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) - .unwrap() - .is_none()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) - .unwrap() - .is_none()); - - update_pox_affirmation_maps(&mut burnchain_db, &headers, 0, &burnchain).unwrap(); - - // there's only one anchor block - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) - .unwrap() - .is_none()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) - .unwrap() - .is_some()); - - // the anchor block itself affirms nothing, since it isn't built on an anchor block - let heaviest_am = - BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) - .unwrap(); - let canonical_am = - BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { - true - }) - .unwrap(); - eprintln!( - "rc=0: after update: heaviest = {}, canonical = {}", - &heaviest_am, &canonical_am - ); - - assert_eq!(heaviest_am, AffirmationMap::decode("").unwrap()); - assert_eq!(canonical_am, AffirmationMap::decode("p").unwrap()); - - let anchor_block_0 = BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) - .unwrap() - .unwrap() - .0; - eprintln!("anchor block 1 at height {}", anchor_block_0.block_height); - assert!(anchor_block_0.block_height < commits_0[7][0].as_ref().unwrap().block_height); - - // descend from a prepare-phase commit in rc 0, so affirms rc 0's anchor block - let (next_headers, commits_1) = make_reward_cycle( - &mut burnchain_db, - &burnchain, - &key_register, - &mut headers, - vec![commits_0[7][0].clone()], - ); - update_pox_affirmation_maps(&mut burnchain_db, &headers, 1, &burnchain).unwrap(); - - // there's two anchor blocks - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) - .unwrap() - .is_none()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) - .unwrap() - .is_some()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 2) - .unwrap() - .is_some()); - - let heaviest_am = - BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) - .unwrap(); - let canonical_am = - BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { - true - }) - .unwrap(); - eprintln!( - "rc=1: heaviest = {}, canonical = {}", - &heaviest_am, &canonical_am - ); - - assert_eq!(heaviest_am, AffirmationMap::decode("p").unwrap()); - assert_eq!(canonical_am, AffirmationMap::decode("pp").unwrap()); - - // descend from a prepare-phase commit in rc 0, so affirms rc 0's anchor block but not rc - // 1's - assert!(anchor_block_0.block_height < commits_0[6][0].as_ref().unwrap().block_height); - let (next_headers, commits_2) = make_reward_cycle( - &mut burnchain_db, - &burnchain, - &key_register, - &mut headers, - vec![commits_0[6][0].clone()], - ); - update_pox_affirmation_maps(&mut burnchain_db, &headers, 2, &burnchain).unwrap(); - - // there's three anchor blocks - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) - .unwrap() - .is_none()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) - .unwrap() - .is_some()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 2) - .unwrap() - .is_some()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 3) - .unwrap() - .is_some()); - - // there are two equivalently heavy affirmation maps, but the affirmation map discovered later - // is the heaviest. - let heaviest_am = - BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) - .unwrap(); - let canonical_am = - BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { - true - }) - .unwrap(); - eprintln!( - "rc=2: heaviest = {}, canonical = {}", - &heaviest_am, &canonical_am - ); - - assert_eq!(heaviest_am, AffirmationMap::decode("pa").unwrap()); - assert_eq!(canonical_am, AffirmationMap::decode("pap").unwrap()); - - // descend from a prepare-phase commit in rc 0, so affirms rc 0's anchor block, but not rc - // 1's or rc 2's - assert!(anchor_block_0.block_height < commits_0[8][0].as_ref().unwrap().block_height); - let (next_headers, commits_3) = make_reward_cycle( - &mut burnchain_db, - &burnchain, - &key_register, - &mut headers, - vec![commits_0[8][0].clone()], - ); - update_pox_affirmation_maps(&mut burnchain_db, &headers, 3, &burnchain).unwrap(); - - // there are three equivalently heavy affirmation maps, but the affirmation map discovered last - // is the heaviest. - let heaviest_am = - BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) - .unwrap(); - let canonical_am = - BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { - true - }) - .unwrap(); - eprintln!( - "rc=3: heaviest = {}, canonical = {}", - &heaviest_am, &canonical_am - ); - - assert_eq!(heaviest_am, AffirmationMap::decode("paa").unwrap()); - assert_eq!(canonical_am, AffirmationMap::decode("paap").unwrap()); - } - - #[test] - fn test_update_pox_affirmation_maps_unique_anchor_block() { - let first_bhh = BurnchainHeaderHash([0; 32]); - let first_timestamp = 0; - let first_height = 0; - - let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = PoxConstants::new(10, 5, 3, 3, 0, 99, 100); - burnchain.first_block_height = first_height; - burnchain.first_block_hash = first_bhh.clone(); - burnchain.first_block_timestamp = first_timestamp; - - let mut burnchain_db = BurnchainDB::connect(":memory:", &burnchain, true).unwrap(); - - let first_block_header = burnchain_db.get_canonical_chain_tip().unwrap(); - - let mut headers = vec![first_block_header.clone()]; - let key_register = make_simple_key_register(&first_block_header.block_hash, 0, 1); - - // first reward cycle is all (linear) commits, so it must elect an anchor block - let (next_headers, commits_0) = make_reward_cycle( - &mut burnchain_db, - &burnchain, - &key_register, - &mut headers, - vec![None], - ); - - // no anchor blocks recorded, yet! - let heaviest_am = - BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) - .unwrap(); - let canonical_am = - BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { - true - }) - .unwrap(); - eprintln!( - "rc=0: before update: heaviest = {}, canonical = {}", - &heaviest_am, &canonical_am - ); - - assert_eq!(heaviest_am, AffirmationMap::decode("").unwrap()); - assert_eq!(canonical_am, AffirmationMap::decode("n").unwrap()); - - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) - .unwrap() - .is_none()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) - .unwrap() - .is_none()); - - update_pox_affirmation_maps(&mut burnchain_db, &headers, 0, &burnchain).unwrap(); - - // there's only one anchor block - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) - .unwrap() - .is_none()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) - .unwrap() - .is_some()); - - // the anchor block itself affirms nothing, since it isn't built on an anchor block - let heaviest_am = - BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) - .unwrap(); - let canonical_am = - BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { - true - }) - .unwrap(); - eprintln!( - "rc=0: after update: heaviest = {}, canonical = {}", - &heaviest_am, &canonical_am - ); - - assert_eq!(heaviest_am, AffirmationMap::decode("").unwrap()); - assert_eq!(canonical_am, AffirmationMap::decode("p").unwrap()); - - let anchor_block_0 = BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) - .unwrap() - .unwrap() - .0; - eprintln!("anchor block 1 at height {}", anchor_block_0.block_height); - assert!(anchor_block_0.block_height < commits_0[7][0].as_ref().unwrap().block_height); - - // try and select the same anchor block, twice - let mut dup_commits = commits_0.clone(); - for (i, cmts) in dup_commits.iter_mut().enumerate() { - let block_header = BurnchainBlockHeader { - block_height: (i + commits_0.len() + 1) as u64, - block_hash: next_burn_header_hash(), - parent_block_hash: headers - .last() - .map(|blk| blk.block_hash.clone()) - .unwrap_or(first_bhh.clone()), - num_txs: cmts.len() as u64, - timestamp: (i + commits_0.len()) as u64, - }; - - for cmt_opt in cmts.iter_mut() { - if let Some(cmt) = cmt_opt.as_mut() { - cmt.block_height = block_header.block_height; - cmt.parent_block_ptr = anchor_block_0.block_height as u32; - cmt.parent_vtxindex = anchor_block_0.vtxindex as u16; - cmt.burn_parent_modulus = - ((cmt.block_height - 1) % BURN_BLOCK_MINED_AT_MODULUS) as u8; - cmt.burn_header_hash = block_header.block_hash.clone(); - cmt.block_header_hash = next_block_hash(); - } - } - - headers.push(block_header.clone()); - - let cmt_ops: Vec = cmts - .iter() - .filter_map(|op| op.clone()) - .map(|op| BlockstackOperationType::LeaderBlockCommit(op)) - .collect(); - - burnchain_db - .store_new_burnchain_block_ops_unchecked( - &burnchain, - &headers, - &block_header, - &cmt_ops, - ) - .unwrap(); - } - - update_pox_affirmation_maps(&mut burnchain_db, &headers, 1, &burnchain).unwrap(); - - // there's still only one anchor blocks - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) - .unwrap() - .is_none()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) - .unwrap() - .is_some()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 2) - .unwrap() - .is_none()); - - let heaviest_am = - BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) - .unwrap(); - let canonical_am = - BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { - true - }) - .unwrap(); - eprintln!( - "rc=1: heaviest = {}, canonical = {}", - &heaviest_am, &canonical_am - ); - - assert_eq!(heaviest_am, AffirmationMap::decode("").unwrap()); - assert_eq!(canonical_am, AffirmationMap::decode("pn").unwrap()); - } - - #[test] - fn test_update_pox_affirmation_maps_absent() { - let first_bhh = BurnchainHeaderHash([0; 32]); - let first_timestamp = 0; - let first_height = 0; - - let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = PoxConstants::new(10, 5, 3, 3, 0, 99, 100); - burnchain.first_block_height = first_height; - burnchain.first_block_hash = first_bhh.clone(); - burnchain.first_block_timestamp = first_timestamp; - - let mut burnchain_db = BurnchainDB::connect(":memory:", &burnchain, true).unwrap(); - - let first_block_header = burnchain_db.get_canonical_chain_tip().unwrap(); - - let mut headers = vec![first_block_header.clone()]; - let key_register = make_simple_key_register(&first_block_header.block_hash, 0, 1); - - // make two histories -- one with an anchor block, and one without. - let (next_headers, commits_0) = make_reward_cycle( - &mut burnchain_db, - &burnchain, - &key_register, - &mut headers, - vec![None, None], - ); - - // no anchor blocks recorded, yet! - let heaviest_am = - BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) - .unwrap(); - assert_eq!(heaviest_am, AffirmationMap::empty()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) - .unwrap() - .is_none()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) - .unwrap() - .is_none()); - - update_pox_affirmation_maps(&mut burnchain_db, &headers, 0, &burnchain).unwrap(); - - // there's only one anchor block, and it's at vtxindex 1 (not 0) - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) - .unwrap() - .is_none()); - assert_eq!( - BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) - .unwrap() - .unwrap() - .0 - .vtxindex, - 1 - ); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 2) - .unwrap() - .is_none()); - - // the anchor block itself affirms nothing - let heaviest_am = - BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) - .unwrap(); - let canonical_am = - BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { - true - }) - .unwrap(); - eprintln!( - "rc=0: heaviest = {}, canonical = {}", - &heaviest_am, &canonical_am - ); - - assert_eq!(heaviest_am, AffirmationMap::decode("").unwrap()); - assert_eq!(canonical_am, AffirmationMap::decode("p").unwrap()); - - for i in 5..10 { - let block_commit = BurnchainDB::get_block_commit( - burnchain_db.conn(), - &commits_0[i][0].as_ref().unwrap().txid, - ) - .unwrap() - .unwrap(); - assert_eq!(block_commit.vtxindex, 0); - - let block_commit_metadata = BurnchainDB::get_commit_metadata( - burnchain_db.conn(), - &block_commit.burn_header_hash, - &block_commit.txid, - ) - .unwrap() - .unwrap(); - assert_eq!(block_commit_metadata.anchor_block_descendant, None); - } - - // build a second reward cycle off of a commit that does _not_ affirm the first anchor - // block - let (next_headers, commits_1) = make_reward_cycle( - &mut burnchain_db, - &burnchain, - &key_register, - &mut headers, - vec![commits_0[9][1].clone(), commits_0[9][0].clone()], - ); - update_pox_affirmation_maps(&mut burnchain_db, &headers, 1, &burnchain).unwrap(); - - // the second anchor block affirms that the first anchor block is missing. - let heaviest_am = - BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) - .unwrap(); - let canonical_am = - BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { - true - }) - .unwrap(); - eprintln!( - "rc=1: heaviest = {}, canonical = {}", - &heaviest_am, &canonical_am - ); - - assert_eq!(heaviest_am, AffirmationMap::decode("a").unwrap()); - assert_eq!(canonical_am, AffirmationMap::decode("ap").unwrap()); - - // build a third reward cycle off of a commit in the second reward cycle, but make it so - // that there is no anchor block mined - let (next_headers, commits_2) = make_reward_cycle_without_anchor( - &mut burnchain_db, - &burnchain, - &key_register, - &mut headers, - vec![commits_1[9][0].clone(), commits_1[9][1].clone()], - ); - update_pox_affirmation_maps(&mut burnchain_db, &headers, 2, &burnchain).unwrap(); - - // there isn't a third anchor block - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) - .unwrap() - .is_none()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) - .unwrap() - .is_some()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 2) - .unwrap() - .is_some()); - - // heaviest _anchor block_ affirmation map is unchanged. - let heaviest_am = - BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) - .unwrap(); - let canonical_am = - BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { - true - }) - .unwrap(); - eprintln!( - "rc=2: heaviest = {}, canonical = {}", - &heaviest_am, &canonical_am - ); - - assert_eq!(heaviest_am, AffirmationMap::decode("a").unwrap()); - assert_eq!(canonical_am, AffirmationMap::decode("apn").unwrap()); - - // build a fourth reward cycle off of a commit in the third reward cycle, but make it so - // that there is no anchor block mined - assert!(commits_2[5][0].is_some()); - assert!(commits_2[5][1].is_some()); - let (next_headers, commits_3) = make_reward_cycle_without_anchor( - &mut burnchain_db, - &burnchain, - &key_register, - &mut headers, - vec![commits_2[5][0].clone(), commits_2[5][1].clone()], - ); - update_pox_affirmation_maps(&mut burnchain_db, &headers, 3, &burnchain).unwrap(); - - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) - .unwrap() - .is_none()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) - .unwrap() - .is_some()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 2) - .unwrap() - .is_some()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 3) - .unwrap() - .is_none()); - - // heaviest _anchor block_ affirmation map is unchanged. - let heaviest_am = - BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) - .unwrap(); - let canonical_am = - BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { - true - }) - .unwrap(); - eprintln!( - "rc=3: heaviest = {}, canonical = {}", - &heaviest_am, &canonical_am - ); - - assert_eq!(heaviest_am, AffirmationMap::decode("a").unwrap()); - assert_eq!(canonical_am, AffirmationMap::decode("apnn").unwrap()); - - // make a fourth fifth cycle, again with a missing anchor block - assert!(commits_3[5][0].is_some()); - assert!(commits_3[5][1].is_some()); - let (next_headers, commits_4) = make_reward_cycle_without_anchor( - &mut burnchain_db, - &burnchain, - &key_register, - &mut headers, - vec![commits_3[5][0].clone(), commits_3[5][1].clone()], - ); - update_pox_affirmation_maps(&mut burnchain_db, &headers, 4, &burnchain).unwrap(); - - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) - .unwrap() - .is_none()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) - .unwrap() - .is_some()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 2) - .unwrap() - .is_some()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 3) - .unwrap() - .is_none()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 4) - .unwrap() - .is_none()); - - // heaviest _anchor block_ affirmation map advances - let heaviest_am = - BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) - .unwrap(); - let canonical_am = - BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { - true - }) - .unwrap(); - eprintln!( - "rc=4: heaviest = {}, canonical = {}", - &heaviest_am, &canonical_am - ); - - assert_eq!(heaviest_am, AffirmationMap::decode("a").unwrap()); - assert_eq!(canonical_am, AffirmationMap::decode("apnnn").unwrap()); - - // make a fifth reward cycle, but with an anchor block. Affirms the first anchor block by - // descending from a chain that descends from it. - assert!(commits_4[5][0].is_some()); - assert!(commits_4[5][1].is_some()); - let (next_headers, commits_5) = make_reward_cycle( - &mut burnchain_db, - &burnchain, - &key_register, - &mut headers, - vec![commits_4[5][1].clone(), commits_4[5][0].clone()], - ); - update_pox_affirmation_maps(&mut burnchain_db, &headers, 5, &burnchain).unwrap(); - - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) - .unwrap() - .is_none()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) - .unwrap() - .is_some()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 2) - .unwrap() - .is_some()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 3) - .unwrap() - .is_none()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 4) - .unwrap() - .is_none()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 5) - .unwrap() - .is_none()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 6) - .unwrap() - .is_some()); - - // heaviest _anchor block_ affirmation map advances, since the new anchor block affirms the - // last 4 reward cycles, including the anchor block mined in the first reward cycle - let heaviest_am = - BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) - .unwrap(); - let canonical_am = - BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { - true - }) - .unwrap(); - eprintln!( - "rc=5: heaviest = {}, canonical = {}", - &heaviest_am, &canonical_am - ); - - // anchor block was chosen in the last reward cycle, and in doing so created the heaviest - // affirmation map for an anchor block, so the canonical affirmation map is - // whatever that last anchor block affirmed - assert_eq!(heaviest_am, AffirmationMap::decode("pannn").unwrap()); - assert_eq!(canonical_am, AffirmationMap::decode("pannnp").unwrap()); - - // make a third history that affirms _nothing_. It should eventually overtake this last - // heaviest affirmation map - let mut start = vec![commits_0[3][1].clone()]; - for i in 0..6 { - let (next_headers, commits) = make_reward_cycle_with_vote( - &mut burnchain_db, - &burnchain, - &key_register, - &mut headers, - start, - false, - ); - update_pox_affirmation_maps(&mut burnchain_db, &headers, 6 + i, &burnchain).unwrap(); - start = vec![commits[5][0].clone()]; - - let heaviest_am = BurnchainDB::get_heaviest_anchor_block_affirmation_map( - burnchain_db.conn(), - &burnchain, - ) - .unwrap(); - let canonical_am = BurnchainDB::get_canonical_affirmation_map( - burnchain_db.conn(), - &burnchain, - |_, _| true, - ) - .unwrap(); - eprintln!( - "rc={}: heaviest = {}, canonical = {}", - 6 + i, - &heaviest_am, - &canonical_am - ); - } - - let heaviest_am = - BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) - .unwrap(); - let canonical_am = - BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { - true - }) - .unwrap(); - eprintln!( - "rc=11: heaviest = {}, canonical = {}", - &heaviest_am, &canonical_am - ); - - assert_eq!(heaviest_am, AffirmationMap::decode("pannn").unwrap()); - assert_eq!( - canonical_am, - AffirmationMap::decode("pannnpnnnnnn").unwrap() - ); - - // other affirmation map should be present - let unaffirmed_am = AffirmationMap::decode("aannnannnnnn").unwrap(); - let am_id = BurnchainDB::get_affirmation_map_id(burnchain_db.conn(), &unaffirmed_am) - .unwrap() - .unwrap(); - let weight = BurnchainDB::get_affirmation_weight(burnchain_db.conn(), am_id) - .unwrap() - .unwrap(); - assert_eq!(weight, 9); - } - - #[test] - fn test_update_pox_affirmation_maps_nothing() { - let first_bhh = BurnchainHeaderHash([0; 32]); - let first_timestamp = 0; - let first_height = 0; - - let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = PoxConstants::new(10, 5, 3, 3, 0, 99, 100); - burnchain.first_block_height = first_height; - burnchain.first_block_hash = first_bhh.clone(); - burnchain.first_block_timestamp = first_timestamp; - - let mut burnchain_db = BurnchainDB::connect(":memory:", &burnchain, true).unwrap(); - - let first_block_header = burnchain_db.get_canonical_chain_tip().unwrap(); - - let mut headers = vec![first_block_header.clone()]; - let key_register = make_simple_key_register(&first_block_header.block_hash, 0, 1); - - // first reward cycle is all (linear) commits, so it must elect an anchor block - let (next_headers, commits_0) = make_reward_cycle( - &mut burnchain_db, - &burnchain, - &key_register, - &mut headers, - vec![None], - ); - - // no anchor blocks recorded, yet! - let heaviest_am = - BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) - .unwrap(); - assert_eq!(heaviest_am, AffirmationMap::empty()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) - .unwrap() - .is_none()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) - .unwrap() - .is_none()); - - update_pox_affirmation_maps(&mut burnchain_db, &headers, 0, &burnchain).unwrap(); - - // there's only one anchor block - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) - .unwrap() - .is_none()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) - .unwrap() - .is_some()); - - // the anchor block itself affirms nothing, since it isn't built on an anchor block - let heaviest_am = - BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) - .unwrap(); - let canonical_am = - BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { - true - }) - .unwrap(); - eprintln!( - "rc=0: heaviest = {}, canonical = {}", - &heaviest_am, &canonical_am - ); - - assert_eq!(heaviest_am, AffirmationMap::decode("").unwrap()); - assert_eq!(canonical_am, AffirmationMap::decode("p").unwrap()); - - // build a second reward cycle off of the first, but with no anchor block - let (next_headers, commits_1) = make_reward_cycle_with_vote( - &mut burnchain_db, - &burnchain, - &key_register, - &mut headers, - vec![commits_0[9][0].clone()], - false, - ); - update_pox_affirmation_maps(&mut burnchain_db, &headers, 1, &burnchain).unwrap(); - - // there's still one anchor block - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) - .unwrap() - .is_none()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) - .unwrap() - .is_some()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 2) - .unwrap() - .is_none()); - - // second reward cycle doesn't have an anchor block, so there's no heaviest anchor block - // affirmation map yet - let heaviest_am = - BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) - .unwrap(); - let canonical_am = - BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { - true - }) - .unwrap(); - eprintln!( - "rc=1: heaviest = {}, canonical = {}", - &heaviest_am, &canonical_am - ); - - assert_eq!(heaviest_am, AffirmationMap::decode("").unwrap()); - assert_eq!(canonical_am, AffirmationMap::decode("pn").unwrap()); - - // build a 3rd reward cycle, but it affirms an anchor block - let last_commit_1 = { - let mut last_commit = None; - for i in 0..commits_1.len() { - if commits_1[i][0].is_some() { - last_commit = commits_1[i][0].clone(); - } - } - last_commit - }; - - let (next_headers, commits_2) = make_reward_cycle( - &mut burnchain_db, - &burnchain, - &key_register, - &mut headers, - vec![last_commit_1], - ); - update_pox_affirmation_maps(&mut burnchain_db, &headers, 2, &burnchain).unwrap(); - - // there's two anchor blocks - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) - .unwrap() - .is_none()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) - .unwrap() - .is_some()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 2) - .unwrap() - .is_none()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 3) - .unwrap() - .is_some()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 4) - .unwrap() - .is_none()); - - // there's no anchor block in rc 1 - let heaviest_am = - BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) - .unwrap(); - let canonical_am = - BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { - true - }) - .unwrap(); - eprintln!( - "rc=2: heaviest = {}, canonical = {}", - &heaviest_am, &canonical_am - ); - - assert_eq!(heaviest_am, AffirmationMap::decode("pn").unwrap()); - assert_eq!(canonical_am, AffirmationMap::decode("pnp").unwrap()); - - // build a fourth reward cycle, with no vote - let (next_headers, commits_3) = make_reward_cycle_with_vote( - &mut burnchain_db, - &burnchain, - &key_register, - &mut headers, - vec![commits_2[9][0].clone()], - false, - ); - update_pox_affirmation_maps(&mut burnchain_db, &headers, 3, &burnchain).unwrap(); - - // there are three equivalently heavy affirmation maps, but the affirmation map discovered last - // is the heaviest. BUT THIS TIME, MAKE THE UNCONFIRMED ORACLE DENY THAT THIS LAST - // ANCHORED BLOCK EXISTS. - let heaviest_am = - BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) - .unwrap(); - let canonical_am = - BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { - false - }) - .unwrap(); - eprintln!( - "rc=3 (deny): heaviest = {}, canonical = {}", - &heaviest_am, &canonical_am - ); - - assert_eq!(heaviest_am, AffirmationMap::decode("pn").unwrap()); - assert_eq!(canonical_am, AffirmationMap::decode("pnan").unwrap()); - - let heaviest_am = - BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) - .unwrap(); - let canonical_am = - BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { - true - }) - .unwrap(); - eprintln!( - "rc=3 (exist): heaviest = {}, canonical = {}", - &heaviest_am, &canonical_am - ); - - assert_eq!(heaviest_am, AffirmationMap::decode("pn").unwrap()); - assert_eq!(canonical_am, AffirmationMap::decode("pnpn").unwrap()); - } - - #[test] - fn test_update_pox_affirmation_fork_2_cycles() { - let first_bhh = BurnchainHeaderHash([0; 32]); - let first_timestamp = 0; - let first_height = 0; - - let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = PoxConstants::new(5, 2, 2, 25, 5, 99, 100); - burnchain.first_block_height = first_height; - burnchain.first_block_hash = first_bhh.clone(); - burnchain.first_block_timestamp = first_timestamp; - - let mut burnchain_db = BurnchainDB::connect(":memory:", &burnchain, true).unwrap(); - - let first_block_header = burnchain_db.get_canonical_chain_tip().unwrap(); - - let mut headers = vec![first_block_header.clone()]; - let key_register = make_simple_key_register(&first_block_header.block_hash, 0, 1); - - // first reward cycle is all (linear) commits, so it must elect an anchor block - let (next_headers, commits_0) = make_reward_cycle( - &mut burnchain_db, - &burnchain, - &key_register, - &mut headers, - vec![None], - ); - - // no anchor blocks recorded, yet! - let heaviest_am = - BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) - .unwrap(); - assert_eq!(heaviest_am, AffirmationMap::empty()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) - .unwrap() - .is_none()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) - .unwrap() - .is_none()); - - update_pox_affirmation_maps(&mut burnchain_db, &headers, 0, &burnchain).unwrap(); - - // there's only one anchor block - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) - .unwrap() - .is_none()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) - .unwrap() - .is_some()); - - // the anchor block itself affirms nothing, since it isn't built on an anchor block - let heaviest_am = - BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) - .unwrap(); - let canonical_am = - BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { - true - }) - .unwrap(); - eprintln!( - "rc=0 (true): heaviest = {}, canonical = {}", - &heaviest_am, &canonical_am - ); - - assert_eq!(heaviest_am, AffirmationMap::decode("").unwrap()); - assert_eq!(canonical_am, AffirmationMap::decode("p").unwrap()); - - let canonical_am = - BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { - false - }) - .unwrap(); - eprintln!( - "rc=0 (false): heaviest = {}, canonical = {}", - &heaviest_am, &canonical_am - ); - - assert_eq!(canonical_am, AffirmationMap::decode("a").unwrap()); - - // build a second reward cycle off of the first - let (next_headers, commits_1) = make_reward_cycle( - &mut burnchain_db, - &burnchain, - &key_register, - &mut headers, - vec![commits_0[4][0].clone()], - ); - update_pox_affirmation_maps(&mut burnchain_db, &headers, 1, &burnchain).unwrap(); - - // there's two anchor blocks - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) - .unwrap() - .is_none()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) - .unwrap() - .is_some()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 2) - .unwrap() - .is_some()); - - // the network affirms two anchor blocks, but the second anchor block only affirms the - // first anchor block. - let heaviest_am = - BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) - .unwrap(); - let canonical_am = - BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { - true - }) - .unwrap(); - eprintln!( - "rc=1 (true): heaviest = {}, canonical = {}", - &heaviest_am, &canonical_am - ); - - assert_eq!(heaviest_am, AffirmationMap::decode("p").unwrap()); - assert_eq!(canonical_am, AffirmationMap::decode("pp").unwrap()); - - let canonical_am = - BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { - false - }) - .unwrap(); - eprintln!( - "rc=1 (false): heaviest = {}, canonical = {}", - &heaviest_am, &canonical_am - ); - - assert_eq!(canonical_am, AffirmationMap::decode("pa").unwrap()); - - // build a third reward cycle off of the first, before the 2nd's anchor block - let (next_headers, commits_2) = make_reward_cycle( - &mut burnchain_db, - &burnchain, - &key_register, - &mut headers, - vec![commits_0[1][0].clone()], - ); - update_pox_affirmation_maps(&mut burnchain_db, &headers, 2, &burnchain).unwrap(); - - // there's four anchor blocks - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) - .unwrap() - .is_none()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) - .unwrap() - .is_some()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 2) - .unwrap() - .is_some()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 3) - .unwrap() - .is_some()); - - let heaviest_am = - BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) - .unwrap(); - let canonical_am = - BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { - true - }) - .unwrap(); - eprintln!( - "rc=2 (true): heaviest = {}, canonical = {}", - &heaviest_am, &canonical_am - ); - - assert_eq!(heaviest_am, AffirmationMap::decode("p").unwrap()); - assert_eq!(canonical_am, AffirmationMap::decode("ppp").unwrap()); - - let canonical_am = - BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { - false - }) - .unwrap(); - eprintln!( - "rc=2 (false): heaviest = {}, canonical = {}", - &heaviest_am, &canonical_am - ); - - assert_eq!(canonical_am, AffirmationMap::decode("paa").unwrap()); - - // build a fourth reward cycle off of the third - let (next_headers, commits_3) = make_reward_cycle( - &mut burnchain_db, - &burnchain, - &key_register, - &mut headers, - vec![commits_2[4][0].clone()], - ); - update_pox_affirmation_maps(&mut burnchain_db, &headers, 3, &burnchain).unwrap(); - - // there's four anchor blocks - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) - .unwrap() - .is_none()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) - .unwrap() - .is_some()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 2) - .unwrap() - .is_some()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 3) - .unwrap() - .is_some()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 4) - .unwrap() - .is_some()); - - let heaviest_am = - BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) - .unwrap(); - let canonical_am = - BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { - true - }) - .unwrap(); - eprintln!( - "rc=3: heaviest = {}, canonical = {}", - &heaviest_am, &canonical_am - ); - - assert_eq!(heaviest_am, AffirmationMap::decode("aap").unwrap()); - assert_eq!(canonical_am, AffirmationMap::decode("aapp").unwrap()); - } - - #[test] - fn test_update_pox_affirmation_fork_duel() { - let first_bhh = BurnchainHeaderHash([0; 32]); - let first_timestamp = 0; - let first_height = 0; - - let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = PoxConstants::new(5, 2, 2, 25, 5, 99, 100); - burnchain.first_block_height = first_height; - burnchain.first_block_hash = first_bhh.clone(); - burnchain.first_block_timestamp = first_timestamp; - - let mut burnchain_db = BurnchainDB::connect(":memory:", &burnchain, true).unwrap(); - - let first_block_header = burnchain_db.get_canonical_chain_tip().unwrap(); - - let mut headers = vec![first_block_header.clone()]; - let key_register = make_simple_key_register(&first_block_header.block_hash, 0, 1); - - // first reward cycle is all (linear) commits, so it must elect an anchor block - let (next_headers, commits_0) = make_reward_cycle( - &mut burnchain_db, - &burnchain, - &key_register, - &mut headers, - vec![None], - ); - - // no anchor blocks recorded, yet! - let heaviest_am = - BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) - .unwrap(); - assert_eq!(heaviest_am, AffirmationMap::empty()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) - .unwrap() - .is_none()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) - .unwrap() - .is_none()); - - update_pox_affirmation_maps(&mut burnchain_db, &headers, 0, &burnchain).unwrap(); - - // there's only one anchor block - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) - .unwrap() - .is_none()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) - .unwrap() - .is_some()); - - // the anchor block itself affirms nothing, since it isn't built on an anchor block - let heaviest_am = - BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) - .unwrap(); - let canonical_am = - BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { - true - }) - .unwrap(); - eprintln!( - "rc=0: heaviest = {}, canonical = {}", - &heaviest_am, &canonical_am - ); - - assert_eq!(heaviest_am, AffirmationMap::decode("").unwrap()); - assert_eq!(canonical_am, AffirmationMap::decode("p").unwrap()); - - // build a second reward cycle off of the first, but at the start - assert!(commits_0[1][0].is_some()); - let (next_headers, commits_1) = make_reward_cycle( - &mut burnchain_db, - &burnchain, - &key_register, - &mut headers, - vec![commits_0[1][0].clone()], - ); - update_pox_affirmation_maps(&mut burnchain_db, &headers, 1, &burnchain).unwrap(); - - // there's two anchor blocks - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) - .unwrap() - .is_none()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) - .unwrap() - .is_some()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 2) - .unwrap() - .is_some()); - - // the network affirms two anchor blocks, but the second one wins - let heaviest_am = - BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) - .unwrap(); - let canonical_am = - BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { - true - }) - .unwrap(); - eprintln!( - "rc=1: heaviest = {}, canonical = {}", - &heaviest_am, &canonical_am - ); - - assert_eq!(heaviest_am, AffirmationMap::decode("a").unwrap()); - assert_eq!(canonical_am, AffirmationMap::decode("ap").unwrap()); - - // build a third reward cycle off of the first - assert!(commits_0[4][0].clone().unwrap().block_height == 5); - let (next_headers, commits_2) = make_reward_cycle( - &mut burnchain_db, - &burnchain, - &key_register, - &mut headers, - vec![commits_0[4][0].clone()], - ); - update_pox_affirmation_maps(&mut burnchain_db, &headers, 2, &burnchain).unwrap(); - - // there's four anchor blocks - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) - .unwrap() - .is_none()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) - .unwrap() - .is_some()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 2) - .unwrap() - .is_some()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 3) - .unwrap() - .is_some()); - - let heaviest_am = - BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) - .unwrap(); - let canonical_am = - BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { - true - }) - .unwrap(); - eprintln!( - "rc=2: heaviest = {}, canonical = {}", - &heaviest_am, &canonical_am - ); - - assert_eq!(heaviest_am, AffirmationMap::decode("pa").unwrap()); - assert_eq!(canonical_am, AffirmationMap::decode("pap").unwrap()); - - // build a fourth reward cycle off of the second - assert!(commits_1[4][0].clone().unwrap().block_height == 10); - let (next_headers, commits_3) = make_reward_cycle( - &mut burnchain_db, - &burnchain, - &key_register, - &mut headers, - vec![commits_1[4][0].clone()], - ); - update_pox_affirmation_maps(&mut burnchain_db, &headers, 3, &burnchain).unwrap(); - - // there's four anchor blocks - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) - .unwrap() - .is_none()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) - .unwrap() - .is_some()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 2) - .unwrap() - .is_some()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 3) - .unwrap() - .is_some()); - assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 4) - .unwrap() - .is_some()); - - let heaviest_am = - BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) - .unwrap(); - let canonical_am = - BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { - true - }) - .unwrap(); - eprintln!( - "rc=3: heaviest = {}, canonical = {}", - &heaviest_am, &canonical_am - ); - - assert_eq!(heaviest_am, AffirmationMap::decode("apa").unwrap()); - assert_eq!(canonical_am, AffirmationMap::decode("apap").unwrap()); - } -} diff --git a/src/burnchains/burnchain.rs b/src/burnchains/burnchain.rs index 4b0e083010..2a311e4671 100644 --- a/src/burnchains/burnchain.rs +++ b/src/burnchains/burnchain.rs @@ -877,7 +877,7 @@ impl Burnchain { if this_reward_cycle != prev_reward_cycle { // at reward cycle boundary - debug!( + info!( "Update PoX affirmation maps for reward cycle {} ({}) block {} cycle-length {}", prev_reward_cycle, this_reward_cycle, @@ -1460,1147 +1460,3 @@ impl Burnchain { Ok(block_header) } } - -#[cfg(test)] -pub mod tests { - use ed25519_dalek::Keypair as VRFKeypair; - use rand::rngs::ThreadRng; - use rand::thread_rng; - use serde::Serialize; - use sha2::Sha512; - - use crate::types::chainstate::StacksAddress; - use crate::types::proof::TrieHash; - use address::AddressHashMode; - use burnchains::affirmation::*; - use burnchains::bitcoin::address::*; - use burnchains::bitcoin::keys::BitcoinPublicKey; - use burnchains::bitcoin::*; - use burnchains::Txid; - use burnchains::*; - use chainstate::burn::db::sortdb::{SortitionDB, SortitionHandleTx}; - use chainstate::burn::distribution::BurnSamplePoint; - use chainstate::burn::operations::{ - leader_block_commit::BURN_BLOCK_MINED_AT_MODULUS, BlockstackOperationType, - LeaderBlockCommitOp, LeaderKeyRegisterOp, UserBurnSupportOp, - }; - use chainstate::burn::{BlockSnapshot, ConsensusHash, OpsHash, SortitionHash}; - use chainstate::stacks::StacksPublicKey; - use util::db::Error as db_error; - use util::get_epoch_time_secs; - use util::hash::hex_bytes; - use util::hash::to_hex; - use util::hash::Hash160; - use util::log; - use util::secp256k1::Secp256k1PrivateKey; - use util::uint::BitArray; - use util::uint::Uint256; - use util::uint::Uint512; - use util::vrf::VRFPrivateKey; - use util::vrf::VRFPublicKey; - - use crate::types::chainstate::{ - BlockHeaderHash, BurnchainHeaderHash, PoxId, SortitionId, VRFSeed, - }; - - #[test] - fn test_process_block_ops() { - let first_burn_hash = BurnchainHeaderHash::from_hex( - "0000000000000000000000000000000000000000000000000000000000000123", - ) - .unwrap(); - let first_block_height = 120; - - let burnchain = Burnchain { - pox_constants: PoxConstants::test_default(), - peer_version: 0x012345678, - network_id: 0x9abcdef0, - chain_name: "bitcoin".to_string(), - network_name: "testnet".to_string(), - working_dir: "/nope".to_string(), - consensus_hash_lifetime: 24, - stable_confirmations: 7, - first_block_height, - initial_reward_start_block: first_block_height, - first_block_timestamp: 0, - first_block_hash: BurnchainHeaderHash::zero(), - }; - let first_burn_hash = BurnchainHeaderHash::from_hex( - "0000000000000000000000000000000000000000000000000000000000000123", - ) - .unwrap(); - let block_121_hash = BurnchainHeaderHash::from_hex( - "0000000000000000000000000000000000000000000000000000000000000012", - ) - .unwrap(); - let block_122_hash = BurnchainHeaderHash::from_hex( - "0000000000000000000000000000000000000000000000000000000000000002", - ) - .unwrap(); - let block_123_hash = BurnchainHeaderHash::from_hex( - "0000000000000000000000000000000000000000000000000000000000000001", - ) - .unwrap(); - let block_124_hash_initial = BurnchainHeaderHash::from_hex( - "0000000000000000000000000000000000000000000000000000000000000004", - ) - .unwrap(); - - let leader_key_1 = LeaderKeyRegisterOp { - consensus_hash: ConsensusHash::from_bytes( - &hex_bytes("0000000000000000000000000000000000000000").unwrap(), - ) - .unwrap(), - public_key: VRFPublicKey::from_bytes( - &hex_bytes("a366b51292bef4edd64063d9145c617fec373bceb0758e98cd72becd84d54c7a") - .unwrap(), - ) - .unwrap(), - memo: vec![01, 02, 03, 04, 05], - address: StacksAddress::from_bitcoin_address( - &BitcoinAddress::from_scriptpubkey( - BitcoinNetworkType::Testnet, - &hex_bytes("76a914306231b2782b5f80d944bf69f9d46a1453a0a0eb88ac").unwrap(), - ) - .unwrap(), - ), - - txid: Txid::from_bytes( - &hex_bytes("1bfa831b5fc56c858198acb8e77e5863c1e9d8ac26d49ddb914e24d8d4083562") - .unwrap(), - ) - .unwrap(), - vtxindex: 456, - block_height: 123, - burn_header_hash: block_123_hash.clone(), - }; - - let leader_key_2 = LeaderKeyRegisterOp { - consensus_hash: ConsensusHash::from_bytes( - &hex_bytes("0000000000000000000000000000000000000000").unwrap(), - ) - .unwrap(), - public_key: VRFPublicKey::from_bytes( - &hex_bytes("bb519494643f79f1dea0350e6fb9a1da88dfdb6137117fc2523824a8aa44fe1c") - .unwrap(), - ) - .unwrap(), - memo: vec![01, 02, 03, 04, 05], - address: StacksAddress::from_bitcoin_address( - &BitcoinAddress::from_scriptpubkey( - BitcoinNetworkType::Testnet, - &hex_bytes("76a914306231b2782b5f80d944bf69f9d46a1453a0a0eb88ac").unwrap(), - ) - .unwrap(), - ), - - txid: Txid::from_bytes( - &hex_bytes("9410df84e2b440055c33acb075a0687752df63fe8fe84aeec61abe469f0448c7") - .unwrap(), - ) - .unwrap(), - vtxindex: 457, - block_height: 122, - burn_header_hash: block_122_hash.clone(), - }; - - let leader_key_3 = LeaderKeyRegisterOp { - consensus_hash: ConsensusHash::from_bytes( - &hex_bytes("0000000000000000000000000000000000000000").unwrap(), - ) - .unwrap(), - public_key: VRFPublicKey::from_bytes( - &hex_bytes("de8af7037e522e65d2fe2d63fb1b764bfea829df78b84444338379df13144a02") - .unwrap(), - ) - .unwrap(), - memo: vec![01, 02, 03, 04, 05], - address: StacksAddress::from_bitcoin_address( - &BitcoinAddress::from_scriptpubkey( - BitcoinNetworkType::Testnet, - &hex_bytes("76a914f464a593895cd58c74a7352dd4a65c491d0c0bf688ac").unwrap(), - ) - .unwrap(), - ), - - txid: Txid::from_bytes( - &hex_bytes("eb54704f71d4a2d1128d60ffccced547054b52250ada6f3e7356165714f44d4c") - .unwrap(), - ) - .unwrap(), - vtxindex: 10, - block_height: 121, - burn_header_hash: block_121_hash.clone(), - }; - - let user_burn_1 = UserBurnSupportOp { - address: StacksAddress::new(1, Hash160([1u8; 20])), - consensus_hash: ConsensusHash::from_bytes( - &hex_bytes("0000000000000000000000000000000000000000").unwrap(), - ) - .unwrap(), - public_key: VRFPublicKey::from_bytes( - &hex_bytes("a366b51292bef4edd64063d9145c617fec373bceb0758e98cd72becd84d54c7a") - .unwrap(), - ) - .unwrap(), - block_header_hash_160: Hash160::from_bytes( - &hex_bytes("7150f635054b87df566a970b21e07030d6444bf2").unwrap(), - ) - .unwrap(), // 22222....2222 - key_block_ptr: 123, - key_vtxindex: 456, - burn_fee: 10000, - - txid: Txid::from_bytes( - &hex_bytes("1d5cbdd276495b07f0e0bf0181fa57c175b217bc35531b078d62fc20986c716b") - .unwrap(), - ) - .unwrap(), - vtxindex: 13, - block_height: 124, - burn_header_hash: block_124_hash_initial.clone(), - }; - - let user_burn_1_2 = UserBurnSupportOp { - address: StacksAddress::new(2, Hash160([2u8; 20])), - consensus_hash: ConsensusHash::from_bytes( - &hex_bytes("0000000000000000000000000000000000000000").unwrap(), - ) - .unwrap(), - public_key: VRFPublicKey::from_bytes( - &hex_bytes("a366b51292bef4edd64063d9145c617fec373bceb0758e98cd72becd84d54c7a") - .unwrap(), - ) - .unwrap(), - block_header_hash_160: Hash160::from_bytes( - &hex_bytes("7150f635054b87df566a970b21e07030d6444bf2").unwrap(), - ) - .unwrap(), // 22222....2222 - key_block_ptr: 123, - key_vtxindex: 456, - burn_fee: 30000, - - txid: Txid::from_bytes( - &hex_bytes("1d5cbdd276495b07f0e0bf0181fa57c175b217bc35531b078d62fc20986c716c") - .unwrap(), - ) - .unwrap(), - vtxindex: 14, - block_height: 124, - burn_header_hash: block_124_hash_initial.clone(), - }; - - let user_burn_2 = UserBurnSupportOp { - address: StacksAddress::new(3, Hash160([3u8; 20])), - consensus_hash: ConsensusHash::from_bytes( - &hex_bytes("0000000000000000000000000000000000000000").unwrap(), - ) - .unwrap(), - public_key: VRFPublicKey::from_bytes( - &hex_bytes("bb519494643f79f1dea0350e6fb9a1da88dfdb6137117fc2523824a8aa44fe1c") - .unwrap(), - ) - .unwrap(), - block_header_hash_160: Hash160::from_bytes( - &hex_bytes("037a1e860899a4fa823c18b66f6264d20236ec58").unwrap(), - ) - .unwrap(), // 22222....2223 - key_block_ptr: 122, - key_vtxindex: 457, - burn_fee: 20000, - - txid: Txid::from_bytes( - &hex_bytes("1d5cbdd276495b07f0e0bf0181fa57c175b217bc35531b078d62fc20986c716d") - .unwrap(), - ) - .unwrap(), - vtxindex: 15, - block_height: 124, - burn_header_hash: block_124_hash_initial.clone(), - }; - - let user_burn_2_2 = UserBurnSupportOp { - address: StacksAddress::new(4, Hash160([4u8; 20])), - consensus_hash: ConsensusHash::from_bytes( - &hex_bytes("0000000000000000000000000000000000000000").unwrap(), - ) - .unwrap(), - public_key: VRFPublicKey::from_bytes( - &hex_bytes("bb519494643f79f1dea0350e6fb9a1da88dfdb6137117fc2523824a8aa44fe1c") - .unwrap(), - ) - .unwrap(), - block_header_hash_160: Hash160::from_bytes( - &hex_bytes("037a1e860899a4fa823c18b66f6264d20236ec58").unwrap(), - ) - .unwrap(), // 22222....2223 - key_block_ptr: 122, - key_vtxindex: 457, - burn_fee: 40000, - - txid: Txid::from_bytes( - &hex_bytes("1d5cbdd276495b07f0e0bf0181fa57c175b217bc35531b078d62fc20986c716e") - .unwrap(), - ) - .unwrap(), - vtxindex: 16, - block_height: 124, - burn_header_hash: block_124_hash_initial.clone(), - }; - - // should be rejected - let user_burn_noblock = UserBurnSupportOp { - address: StacksAddress::new(5, Hash160([5u8; 20])), - consensus_hash: ConsensusHash::from_bytes( - &hex_bytes("0000000000000000000000000000000000000000").unwrap(), - ) - .unwrap(), - public_key: VRFPublicKey::from_bytes( - &hex_bytes("a366b51292bef4edd64063d9145c617fec373bceb0758e98cd72becd84d54c7a") - .unwrap(), - ) - .unwrap(), - block_header_hash_160: Hash160::from_bytes( - &hex_bytes("3333333333333333333333333333333333333333").unwrap(), - ) - .unwrap(), - key_block_ptr: 122, - key_vtxindex: 772, - burn_fee: 12345, - - txid: Txid::from_bytes( - &hex_bytes("1d5cbdd276495b07f0e0bf0181fa57c175b217bc35531b078d62fc20986c716f") - .unwrap(), - ) - .unwrap(), - vtxindex: 12, - block_height: 123, - burn_header_hash: block_123_hash.clone(), - }; - - // should be rejected - let user_burn_nokey = UserBurnSupportOp { - address: StacksAddress::new(6, Hash160([6u8; 20])), - consensus_hash: ConsensusHash::from_bytes( - &hex_bytes("0000000000000000000000000000000000000000").unwrap(), - ) - .unwrap(), - public_key: VRFPublicKey::from_bytes( - &hex_bytes("3f3338db51f2b1f6ac0cf6177179a24ee130c04ef2f9849a64a216969ab60e70") - .unwrap(), - ) - .unwrap(), - block_header_hash_160: Hash160::from_bytes( - &hex_bytes("037a1e860899a4fa823c18b66f6264d20236ec58").unwrap(), - ) - .unwrap(), - key_block_ptr: 122, - key_vtxindex: 457, - burn_fee: 12345, - - txid: Txid::from_bytes( - &hex_bytes("1d5cbdd276495b07f0e0bf0181fa57c175b217bc35531b078d62fc20986c7170") - .unwrap(), - ) - .unwrap(), - vtxindex: 15, - block_height: 123, - burn_header_hash: block_123_hash.clone(), - }; - - let block_commit_1 = LeaderBlockCommitOp { - sunset_burn: 0, - commit_outs: vec![], - block_header_hash: BlockHeaderHash::from_bytes( - &hex_bytes("2222222222222222222222222222222222222222222222222222222222222222") - .unwrap(), - ) - .unwrap(), - new_seed: VRFSeed::from_bytes( - &hex_bytes("3333333333333333333333333333333333333333333333333333333333333333") - .unwrap(), - ) - .unwrap(), - parent_block_ptr: 0, - parent_vtxindex: 0, - key_block_ptr: 123, - key_vtxindex: 456, - memo: vec![0x80], - - burn_fee: 12345, - input: (Txid([0; 32]), 0), - apparent_sender: BurnchainSigner { - public_keys: vec![StacksPublicKey::from_hex( - "02d8015134d9db8178ac93acbc43170a2f20febba5087a5b0437058765ad5133d0", - ) - .unwrap()], - num_sigs: 1, - hash_mode: AddressHashMode::SerializeP2PKH, - }, - - txid: Txid::from_bytes( - &hex_bytes("3c07a0a93360bc85047bbaadd49e30c8af770f73a37e10fec400174d2e5f27cf") - .unwrap(), - ) - .unwrap(), - vtxindex: 444, - block_height: 124, - burn_parent_modulus: (123 % BURN_BLOCK_MINED_AT_MODULUS) as u8, - burn_header_hash: block_124_hash_initial.clone(), - }; - - let block_commit_2 = LeaderBlockCommitOp { - sunset_burn: 0, - commit_outs: vec![], - block_header_hash: BlockHeaderHash::from_bytes( - &hex_bytes("2222222222222222222222222222222222222222222222222222222222222223") - .unwrap(), - ) - .unwrap(), - new_seed: VRFSeed::from_bytes( - &hex_bytes("3333333333333333333333333333333333333333333333333333333333333334") - .unwrap(), - ) - .unwrap(), - parent_block_ptr: 0, - parent_vtxindex: 0, - key_block_ptr: 122, - key_vtxindex: 457, - memo: vec![0x80], - - burn_fee: 12345, - input: (Txid([0; 32]), 0), - apparent_sender: BurnchainSigner { - public_keys: vec![StacksPublicKey::from_hex( - "02d8015134d9db8178ac93acbc43170a2f20febba5087a5b0437058765ad5133d0", - ) - .unwrap()], - num_sigs: 1, - hash_mode: AddressHashMode::SerializeP2PKH, - }, - - txid: Txid::from_bytes( - &hex_bytes("3c07a0a93360bc85047bbaadd49e30c8af770f73a37e10fec400174d2e5f27d0") - .unwrap(), - ) - .unwrap(), - vtxindex: 445, - block_height: 124, - burn_parent_modulus: (123 % BURN_BLOCK_MINED_AT_MODULUS) as u8, - burn_header_hash: block_124_hash_initial.clone(), - }; - - let block_commit_3 = LeaderBlockCommitOp { - sunset_burn: 0, - commit_outs: vec![], - block_header_hash: BlockHeaderHash::from_bytes( - &hex_bytes("2222222222222222222222222222222222222222222222222222222222222224") - .unwrap(), - ) - .unwrap(), - new_seed: VRFSeed::from_bytes( - &hex_bytes("3333333333333333333333333333333333333333333333333333333333333335") - .unwrap(), - ) - .unwrap(), - parent_block_ptr: 0, - parent_vtxindex: 0, - key_block_ptr: 121, - key_vtxindex: 10, - memo: vec![0x80], - - burn_fee: 23456, - input: (Txid([0; 32]), 0), - apparent_sender: BurnchainSigner { - public_keys: vec![StacksPublicKey::from_hex( - "0283d603abdd2392646dbdd0dc80beb39c25bfab96a8a921ea5e7517ce533f8cd5", - ) - .unwrap()], - num_sigs: 1, - hash_mode: AddressHashMode::SerializeP2PKH, - }, - - txid: Txid::from_bytes( - &hex_bytes("301dc687a9f06a1ae87a013f27133e9cec0843c2983567be73e185827c7c13de") - .unwrap(), - ) - .unwrap(), - vtxindex: 446, - block_height: 124, - burn_parent_modulus: (123 % BURN_BLOCK_MINED_AT_MODULUS) as u8, - burn_header_hash: block_124_hash_initial.clone(), - }; - - let block_ops_121: Vec = - vec![BlockstackOperationType::LeaderKeyRegister( - leader_key_3.clone(), - )]; - let block_opshash_121 = OpsHash::from_txids(&vec![leader_key_3.txid.clone()]); - let block_prev_chs_121 = - vec![ConsensusHash::from_hex("0000000000000000000000000000000000000000").unwrap()]; - let mut block_121_snapshot = BlockSnapshot { - accumulated_coinbase_ustx: 0, - pox_valid: true, - block_height: 121, - burn_header_hash: block_121_hash.clone(), - sortition_id: SortitionId(block_121_hash.0.clone()), - parent_sortition_id: SortitionId(block_121_hash.0.clone()), - burn_header_timestamp: 121, - parent_burn_header_hash: first_burn_hash.clone(), - ops_hash: block_opshash_121.clone(), - consensus_hash: ConsensusHash::from_ops( - &block_121_hash, - &block_opshash_121, - 0, - &block_prev_chs_121, - &PoxId::stubbed(), - ), - total_burn: 0, - sortition: false, - sortition_hash: SortitionHash::initial().mix_burn_header(&block_121_hash), - winning_block_txid: Txid::from_hex( - "0000000000000000000000000000000000000000000000000000000000000000", - ) - .unwrap(), - winning_stacks_block_hash: BlockHeaderHash::from_hex( - "0000000000000000000000000000000000000000000000000000000000000000", - ) - .unwrap(), - index_root: TrieHash::from_empty_data(), // TBD - num_sortitions: 0, - stacks_block_accepted: false, - stacks_block_height: 0, - arrival_index: 0, - canonical_stacks_tip_height: 0, - canonical_stacks_tip_hash: BlockHeaderHash([0u8; 32]), - canonical_stacks_tip_consensus_hash: ConsensusHash([0u8; 20]), - ..BlockSnapshot::initial(0, &first_burn_hash, 0) - }; - - let block_ops_122: Vec = - vec![BlockstackOperationType::LeaderKeyRegister( - leader_key_2.clone(), - )]; - let block_opshash_122 = OpsHash::from_txids(&vec![leader_key_2.txid.clone()]); - let block_prev_chs_122 = vec![ - block_121_snapshot.consensus_hash.clone(), - ConsensusHash::from_hex("0000000000000000000000000000000000000000").unwrap(), - ]; - let mut block_122_snapshot = BlockSnapshot { - accumulated_coinbase_ustx: 0, - pox_valid: true, - block_height: 122, - burn_header_hash: block_122_hash.clone(), - sortition_id: SortitionId(block_122_hash.0.clone()), - parent_sortition_id: block_121_snapshot.sortition_id.clone(), - burn_header_timestamp: 122, - parent_burn_header_hash: block_121_hash.clone(), - ops_hash: block_opshash_122.clone(), - consensus_hash: ConsensusHash::from_ops( - &block_122_hash, - &block_opshash_122, - 0, - &block_prev_chs_122, - &PoxId::stubbed(), - ), - total_burn: 0, - sortition: false, - sortition_hash: SortitionHash::initial() - .mix_burn_header(&block_121_hash) - .mix_burn_header(&block_122_hash), - winning_block_txid: Txid::from_hex( - "0000000000000000000000000000000000000000000000000000000000000000", - ) - .unwrap(), - winning_stacks_block_hash: BlockHeaderHash::from_hex( - "0000000000000000000000000000000000000000000000000000000000000000", - ) - .unwrap(), - index_root: TrieHash::from_empty_data(), // TBD - num_sortitions: 0, - stacks_block_accepted: false, - stacks_block_height: 0, - arrival_index: 0, - canonical_stacks_tip_height: 0, - canonical_stacks_tip_hash: BlockHeaderHash([0u8; 32]), - canonical_stacks_tip_consensus_hash: ConsensusHash([0u8; 20]), - ..BlockSnapshot::initial(0, &first_burn_hash, 0) - }; - - let block_ops_123: Vec = vec![ - BlockstackOperationType::UserBurnSupport(user_burn_noblock.clone()), - BlockstackOperationType::UserBurnSupport(user_burn_nokey.clone()), - BlockstackOperationType::LeaderKeyRegister(leader_key_1.clone()), - ]; - let block_opshash_123 = OpsHash::from_txids(&vec![ - // notably, the user burns here _wont_ be included in the consensus hash - leader_key_1.txid.clone(), - ]); - let block_prev_chs_123 = vec![ - block_122_snapshot.consensus_hash.clone(), - block_121_snapshot.consensus_hash.clone(), - ]; - let mut block_123_snapshot = BlockSnapshot { - accumulated_coinbase_ustx: 0, - pox_valid: true, - block_height: 123, - burn_header_hash: block_123_hash.clone(), - sortition_id: SortitionId(block_123_hash.0.clone()), - parent_sortition_id: block_122_snapshot.sortition_id.clone(), - burn_header_timestamp: 123, - parent_burn_header_hash: block_122_hash.clone(), - ops_hash: block_opshash_123.clone(), - consensus_hash: ConsensusHash::from_ops( - &block_123_hash, - &block_opshash_123, - 0, - &block_prev_chs_123, - &PoxId::stubbed(), - ), // user burns not included, so zero burns this block - total_burn: 0, - sortition: false, - sortition_hash: SortitionHash::initial() - .mix_burn_header(&block_121_hash) - .mix_burn_header(&block_122_hash) - .mix_burn_header(&block_123_hash), - winning_block_txid: Txid::from_hex( - "0000000000000000000000000000000000000000000000000000000000000000", - ) - .unwrap(), - winning_stacks_block_hash: BlockHeaderHash::from_hex( - "0000000000000000000000000000000000000000000000000000000000000000", - ) - .unwrap(), - index_root: TrieHash::from_empty_data(), // TBD - num_sortitions: 0, - stacks_block_accepted: false, - stacks_block_height: 0, - arrival_index: 0, - canonical_stacks_tip_height: 0, - canonical_stacks_tip_hash: BlockHeaderHash([0u8; 32]), - canonical_stacks_tip_consensus_hash: ConsensusHash([0u8; 20]), - ..BlockSnapshot::initial(0, &first_burn_hash, 0) - }; - - // multiple possibilities for block 124 -- we'll reorg the chain each time back to 123 and - // re-try block 124 to test them all. - let block_ops_124_possibilities = vec![ - vec![BlockstackOperationType::LeaderBlockCommit( - block_commit_1.clone(), - )], - vec![ - BlockstackOperationType::LeaderBlockCommit(block_commit_1.clone()), - BlockstackOperationType::LeaderBlockCommit(block_commit_2.clone()), - BlockstackOperationType::LeaderBlockCommit(block_commit_3.clone()), - ], - vec![ - BlockstackOperationType::LeaderBlockCommit(block_commit_1.clone()), - BlockstackOperationType::LeaderBlockCommit(block_commit_2.clone()), - BlockstackOperationType::LeaderBlockCommit(block_commit_3.clone()), - ], - ]; - - let block_124_winners = vec![ - block_commit_1.clone(), - block_commit_3.clone(), - block_commit_1.clone(), - ]; - - let mut db = SortitionDB::connect_test(first_block_height, &first_burn_hash).unwrap(); - - // NOTE: the .txs() method will NOT be called, so we can pass an empty vec![] here - let block121 = BurnchainBlock::Bitcoin(BitcoinBlock::new( - 121, - &block_121_hash, - &first_burn_hash, - &vec![], - 121, - )); - let block122 = BurnchainBlock::Bitcoin(BitcoinBlock::new( - 122, - &block_122_hash, - &block_121_hash, - &vec![], - 122, - )); - let block123 = BurnchainBlock::Bitcoin(BitcoinBlock::new( - 123, - &block_123_hash, - &block_122_hash, - &vec![], - 123, - )); - - let initial_snapshot = BlockSnapshot::initial( - first_block_height, - &first_burn_hash, - first_block_height as u64, - ); - - // process up to 124 - { - let header = block121.header(); - let mut tx = SortitionHandleTx::begin(&mut db, &initial_snapshot.sortition_id).unwrap(); - - let (sn121, _) = tx - .process_block_ops( - &burnchain, - &initial_snapshot, - &header, - block_ops_121, - None, - PoxId::stubbed(), - None, - 0, - ) - .unwrap(); - tx.commit().unwrap(); - - block_121_snapshot.index_root = sn121.index_root.clone(); - block_121_snapshot.parent_sortition_id = sn121.parent_sortition_id.clone(); - assert_eq!(sn121, block_121_snapshot); - } - { - let header = block122.header(); - let mut tx = - SortitionHandleTx::begin(&mut db, &block_121_snapshot.sortition_id).unwrap(); - - let (sn122, _) = tx - .process_block_ops( - &burnchain, - &block_121_snapshot, - &header, - block_ops_122, - None, - PoxId::stubbed(), - None, - 0, - ) - .unwrap(); - tx.commit().unwrap(); - - block_122_snapshot.index_root = sn122.index_root.clone(); - block_122_snapshot.parent_sortition_id = sn122.parent_sortition_id.clone(); - assert_eq!(sn122, block_122_snapshot); - } - { - let header = block123.header(); - let mut tx = - SortitionHandleTx::begin(&mut db, &block_122_snapshot.sortition_id).unwrap(); - let (sn123, _) = tx - .process_block_ops( - &burnchain, - &block_122_snapshot, - &header, - block_ops_123, - None, - PoxId::stubbed(), - None, - 0, - ) - .unwrap(); - tx.commit().unwrap(); - - block_123_snapshot.index_root = sn123.index_root.clone(); - block_123_snapshot.parent_sortition_id = sn123.parent_sortition_id.clone(); - assert_eq!(sn123, block_123_snapshot); - } - - for scenario_idx in 0..block_ops_124_possibilities.len() { - let mut block_ops_124 = block_ops_124_possibilities[scenario_idx].clone(); - let mut block_124_hash_bytes = block_124_hash_initial.as_bytes().clone(); - block_124_hash_bytes[0] = (scenario_idx + 1) as u8; - let block_124_hash = BurnchainHeaderHash(block_124_hash_bytes); - - for op in block_ops_124.iter_mut() { - op.set_burn_header_hash(block_124_hash.clone()); - } - - // everything will be included - let block_opshash_124 = OpsHash::from_txids( - &block_ops_124 - .clone() - .into_iter() - .map(|bo| bo.txid()) - .collect(), - ); - let block_prev_chs_124 = vec![ - block_123_snapshot.consensus_hash.clone(), - block_122_snapshot.consensus_hash.clone(), - ConsensusHash::from_hex("0000000000000000000000000000000000000000").unwrap(), - ]; - - let burn_total = block_ops_124.iter().fold(0u64, |mut acc, op| { - let bf = match op { - BlockstackOperationType::LeaderBlockCommit(ref op) => op.burn_fee, - BlockstackOperationType::UserBurnSupport(ref op) => 0, - _ => 0, - }; - acc += bf; - acc - }); - - let next_sortition = block_ops_124.len() > 0 && burn_total > 0; - - let mut block_124_snapshot = BlockSnapshot { - accumulated_coinbase_ustx: 400_000_000, - pox_valid: true, - block_height: 124, - burn_header_hash: block_124_hash.clone(), - sortition_id: SortitionId(block_124_hash.0.clone()), - parent_sortition_id: block_123_snapshot.sortition_id.clone(), - burn_header_timestamp: 124, - parent_burn_header_hash: block_123_snapshot.burn_header_hash.clone(), - ops_hash: block_opshash_124.clone(), - consensus_hash: ConsensusHash::from_ops( - &block_124_hash, - &block_opshash_124, - burn_total, - &block_prev_chs_124, - &PoxId::stubbed(), - ), - total_burn: burn_total, - sortition: next_sortition, - sortition_hash: SortitionHash::initial() - .mix_burn_header(&block_121_hash) - .mix_burn_header(&block_122_hash) - .mix_burn_header(&block_123_hash) - .mix_burn_header(&block_124_hash), - winning_block_txid: block_124_winners[scenario_idx].txid.clone(), - winning_stacks_block_hash: block_124_winners[scenario_idx] - .block_header_hash - .clone(), - index_root: TrieHash::from_empty_data(), // TDB - num_sortitions: if next_sortition { 1 } else { 0 }, - stacks_block_accepted: false, - stacks_block_height: 0, - arrival_index: 0, - canonical_stacks_tip_height: 0, - canonical_stacks_tip_hash: BlockHeaderHash([0u8; 32]), - canonical_stacks_tip_consensus_hash: ConsensusHash([0u8; 20]), - ..BlockSnapshot::initial(0, &first_burn_hash, 0) - }; - - if next_sortition { - block_124_snapshot.sortition_hash = block_124_snapshot - .sortition_hash - .mix_VRF_seed(&block_124_winners[scenario_idx].new_seed); - } - - let block124 = BurnchainBlock::Bitcoin(BitcoinBlock::new( - 124, - &block_124_hash, - &block_123_hash, - &vec![], - 124, - )); - - // process this scenario - let sn124 = { - let header = block124.header(); - let mut tx = - SortitionHandleTx::begin(&mut db, &block_123_snapshot.sortition_id).unwrap(); - let (sn124, _) = tx - .process_block_ops( - &burnchain, - &block_123_snapshot, - &header, - block_ops_124, - None, - PoxId::stubbed(), - None, - 0, - ) - .unwrap(); - tx.commit().unwrap(); - - block_124_snapshot.index_root = sn124.index_root.clone(); - block_124_snapshot.parent_sortition_id = sn124.parent_sortition_id.clone(); - sn124 - }; - - assert_eq!(sn124, block_124_snapshot); - - // get all winning block commit hashes. - // There should only be two -- the winning block at height 124, and the genesis - // sentinel block hash. This is because epochs 121, 122, and 123 don't have any block - // commits. - let expected_winning_hashes = vec![ - BlockHeaderHash([0u8; 32]), - block_124_winners[scenario_idx].block_header_hash.clone(), - ]; - - // TODO: pair up with stacks chain state? - /* - let winning_header_hashes = { - let mut tx = db.tx_begin().unwrap(); - BurnDB::get_stacks_block_header_inventory(&mut tx, 124).unwrap() - .iter() - .map(|ref hinv| hinv.0.clone()) - .collect() - }; - - assert_eq!(expected_winning_hashes, winning_header_hashes); - */ - } - } - - #[test] - fn test_burn_snapshot_sequence() { - let first_burn_hash = BurnchainHeaderHash::from_hex( - "0000000000000000000000000000000000000000000000000000000000000123", - ) - .unwrap(); - let first_block_height = 120; - - let burnchain = Burnchain { - pox_constants: PoxConstants::test_default(), - peer_version: 0x012345678, - network_id: 0x9abcdef0, - chain_name: "bitcoin".to_string(), - network_name: "testnet".to_string(), - working_dir: "/nope".to_string(), - consensus_hash_lifetime: 24, - stable_confirmations: 7, - first_block_timestamp: 0, - first_block_hash: first_burn_hash, - first_block_height, - initial_reward_start_block: first_block_height, - }; - - let mut leader_private_keys = vec![]; - let mut leader_public_keys = vec![]; - let mut leader_bitcoin_public_keys = vec![]; - let mut leader_bitcoin_addresses = vec![]; - - for i in 0..32 { - let mut csprng: ThreadRng = thread_rng(); - let keypair: VRFKeypair = VRFKeypair::generate(&mut csprng); - - let privkey_hex = to_hex(&keypair.secret.to_bytes()); - leader_private_keys.push(privkey_hex); - - let pubkey_hex = to_hex(&keypair.public.to_bytes()); - leader_public_keys.push(pubkey_hex); - - let bitcoin_privkey = Secp256k1PrivateKey::new(); - let bitcoin_publickey = BitcoinPublicKey::from_private(&bitcoin_privkey); - - leader_bitcoin_public_keys.push(to_hex(&bitcoin_publickey.to_bytes())); - - let btc_input = BitcoinTxInput { - in_type: BitcoinInputType::Standard, - keys: vec![bitcoin_publickey.clone()], - num_required: 1, - tx_ref: (Txid([0; 32]), 0), - }; - - leader_bitcoin_addresses.push( - BitcoinAddress::from_bytes( - BitcoinNetworkType::Testnet, - BitcoinAddressType::PublicKeyHash, - &btc_input.to_address_bits(), - ) - .unwrap(), - ); - } - - let mut expected_burn_total: u64 = 0; - - // insert all operations - let mut db = SortitionDB::connect_test(first_block_height, &first_burn_hash).unwrap(); - let mut prev_snapshot = BlockSnapshot::initial( - first_block_height, - &first_burn_hash, - first_block_height as u64, - ); - let mut all_stacks_block_hashes = vec![]; - - for i in 0..32 { - let mut block_ops = vec![]; - let burn_block_hash = BurnchainHeaderHash::from_bytes(&vec![ - i + 1, - i + 1, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - i + 1, - ]) - .unwrap(); - let parent_burn_block_hash = prev_snapshot.burn_header_hash.clone(); - let parent_index_root = prev_snapshot.index_root.clone(); - - // insert block commit paired to previous round's leader key, as well as a user burn - if i > 0 { - let next_block_commit = LeaderBlockCommitOp { - sunset_burn: 0, - commit_outs: vec![], - block_header_hash: BlockHeaderHash::from_bytes(&vec![ - i, i, i, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, - ]) - .unwrap(), - new_seed: VRFSeed::from_bytes(&vec![ - i, i, i, i, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, - ]) - .unwrap(), - parent_block_ptr: (if i == 1 { - 0 - } else { - first_block_height + (i as u64) - }) as u32, - parent_vtxindex: (if i == 1 { 0 } else { 2 * (i - 1) }) as u16, - key_block_ptr: (first_block_height + (i as u64)) as u32, - key_vtxindex: (2 * (i - 1) + 1) as u16, - memo: vec![i], - - burn_fee: i as u64, - input: (Txid([0; 32]), 0), - apparent_sender: BurnchainSigner { - public_keys: vec![StacksPublicKey::from_hex( - &leader_bitcoin_public_keys[(i - 1) as usize].clone(), - ) - .unwrap()], - num_sigs: 1, - hash_mode: AddressHashMode::SerializeP2PKH, - }, - - txid: Txid::from_bytes(&vec![ - i, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, i, - ]) - .unwrap(), - vtxindex: (2 * i) as u32, - block_height: first_block_height + ((i + 1) as u64), - burn_parent_modulus: ((first_block_height + (i as u64)) - % BURN_BLOCK_MINED_AT_MODULUS) - as u8, - burn_header_hash: burn_block_hash.clone(), - }; - - all_stacks_block_hashes.push(next_block_commit.block_header_hash.clone()); - block_ops.push(BlockstackOperationType::LeaderBlockCommit( - next_block_commit, - )); - } - - let ch = { - let ic = db.index_handle(&prev_snapshot.sortition_id); - ic.get_consensus_at((i as u64) + first_block_height) - .unwrap() - .unwrap_or(ConsensusHash::empty()) - }; - - let next_leader_key = LeaderKeyRegisterOp { - consensus_hash: ch.clone(), - public_key: VRFPublicKey::from_bytes( - &hex_bytes(&leader_public_keys[i as usize]).unwrap(), - ) - .unwrap(), - memo: vec![0, 0, 0, 0, i], - address: StacksAddress::from_bitcoin_address( - &leader_bitcoin_addresses[i as usize].clone(), - ), - - txid: Txid::from_bytes(&vec![ - i, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, - ]) - .unwrap(), - vtxindex: (2 * i + 1) as u32, - block_height: first_block_height + (i + 1) as u64, - burn_header_hash: burn_block_hash.clone(), - }; - - block_ops.push(BlockstackOperationType::LeaderKeyRegister(next_leader_key)); - - let block = BurnchainBlock::Bitcoin(BitcoinBlock::new( - first_block_height + (i + 1) as u64, - &burn_block_hash, - &parent_burn_block_hash, - &vec![], - get_epoch_time_secs(), - )); - - // process this block - let snapshot = { - let header = block.header(); - let mut tx = - SortitionHandleTx::begin(&mut db, &prev_snapshot.sortition_id).unwrap(); - let (sn, _) = tx - .process_block_ops( - &burnchain, - &prev_snapshot, - &header, - block_ops, - None, - PoxId::stubbed(), - None, - 0, - ) - .unwrap(); - tx.commit().unwrap(); - sn - }; - - if i > 0 { - expected_burn_total += i as u64; - - assert_eq!(snapshot.total_burn, expected_burn_total); - assert_eq!( - snapshot.winning_block_txid, - Txid::from_bytes(&vec![ - i, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, i - ]) - .unwrap() - ); - assert_eq!( - snapshot.winning_stacks_block_hash, - BlockHeaderHash::from_bytes(&vec![ - i, i, i, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0 - ]) - .unwrap() - ); - assert_eq!(snapshot.burn_header_hash, burn_block_hash); - assert_eq!(snapshot.parent_burn_header_hash, parent_burn_block_hash); - assert_eq!(snapshot.block_height, (i as u64) + 1 + first_block_height); - assert!(snapshot.sortition); - } else { - assert!(!snapshot.sortition); - assert_eq!(snapshot.total_burn, 0); - } - - prev_snapshot = snapshot; - } - } -} diff --git a/src/burnchains/db.rs b/src/burnchains/db.rs index bc9687c31a..a75317e416 100644 --- a/src/burnchains/db.rs +++ b/src/burnchains/db.rs @@ -192,6 +192,8 @@ impl FromRow for BlockstackOperationType { } } +pub const BURNCHAIN_DB_VERSION: &'static str = "2"; + const BURNCHAIN_DB_SCHEMA: &'static str = r#" CREATE TABLE burnchain_db_block_headers ( block_height INTEGER NOT NULL, @@ -216,6 +218,7 @@ CREATE TABLE affirmation_maps ( weight INTEGER NOT NULL, affirmation_map TEXT NOT NULL ); +CREATE INDEX affirmation_maps_index ON affirmation_maps(affirmation_map); -- ensure anchor block uniqueness CREATE TABLE anchor_blocks ( @@ -243,6 +246,8 @@ CREATE TABLE overrides ( affirmation_map TEXT NOT NULL ); +CREATE TABLE db_config(version TEXT NOT NULL); + INSERT INTO affirmation_maps(affirmation_id,weight,affirmation_map) VALUES (0,0,""); -- empty affirmation map INSERT INTO anchor_blocks(reward_cycle) VALUES (9223372036854775807); -- non-existant reward cycle (i64::MAX) "#; @@ -264,7 +269,7 @@ impl<'a> BurnchainDBTransaction<'a> { ]; match self.sql_tx.execute(sql, args) { Ok(_) => Ok(self.sql_tx.last_insert_rowid()), - Err(e) => Err(BurnchainError::from(e)), + Err(e) => Err(e.into()), } } @@ -327,7 +332,7 @@ impl<'a> BurnchainDBTransaction<'a> { ]; match self.sql_tx.execute(sql, args) { Ok(_) => { - test_debug!( + info!( "Set anchor block for reward cycle {} to {},{},{},{}", target_reward_cycle, &block_commit.burn_header_hash, @@ -448,8 +453,7 @@ impl<'a> BurnchainDBTransaction<'a> { commit.block_height, commit.vtxindex ); - self.update_block_commit_affirmation(commit, None, 0) - .map_err(|e| BurnchainError::from(e))?; + self.update_block_commit_affirmation(commit, None, 0)?; } } else { if commit.parent_block_ptr == 0 && commit.parent_vtxindex == 0 { @@ -468,8 +472,7 @@ impl<'a> BurnchainDBTransaction<'a> { commit.vtxindex ); } - self.update_block_commit_affirmation(commit, None, 0) - .map_err(|e| BurnchainError::from(e))?; + self.update_block_commit_affirmation(commit, None, 0)?; } } @@ -509,14 +512,14 @@ impl<'a> BurnchainDBTransaction<'a> { Some(p) => p, None => { if block_commit.parent_block_ptr == 0 && block_commit.vtxindex == 0 { - test_debug!( + debug!( "Prepare-phase commit {},{},{} builds off of genesis", &block_commit.block_header_hash, block_commit.block_height, block_commit.vtxindex ); } else { - test_debug!( + debug!( "Prepare-phase commit {},{},{} has no parent, so must be invalid", &block_commit.block_header_hash, block_commit.block_height, @@ -531,20 +534,24 @@ impl<'a> BurnchainDBTransaction<'a> { BurnchainDB::get_commit_metadata(&self.sql_tx, &parent.burn_header_hash, &parent.txid)? .expect("BUG: no metadata found for parent block-commit"); - let (am, affirmed_reward_cycle) = if let Some(ab) = anchor_block { - let anchor_am_id = BurnchainDB::get_block_commit_affirmation_id(&self.sql_tx, &ab)? - .expect("BUG: anchor block has no affirmation map"); + let (am, affirmed_reward_cycle) = if let Some(anchor_block) = anchor_block { + let anchor_am_id = + BurnchainDB::get_block_commit_affirmation_id(&self.sql_tx, &anchor_block)? + .expect("BUG: anchor block has no affirmation map"); - let mut am = BurnchainDB::get_affirmation_map(&self.sql_tx, anchor_am_id) - .map_err(|e| BurnchainError::from(e))? + let mut am = BurnchainDB::get_affirmation_map(&self.sql_tx, anchor_am_id)? .ok_or(BurnchainError::DBError(DBError::NotFoundError))?; if descends_from_anchor_block { - test_debug!("Prepare-phase commit {},{},{} descends from anchor block {},{},{} for reward cycle {}", &block_commit.block_header_hash, block_commit.block_height, block_commit.vtxindex, &ab.block_header_hash, ab.block_height, ab.vtxindex, reward_cycle); + test_debug!("Prepare-phase commit {},{},{} descends from anchor block {},{},{} for reward cycle {}", + &block_commit.block_header_hash, block_commit.block_height, block_commit.vtxindex, &anchor_block.block_header_hash, anchor_block.block_height, anchor_block.vtxindex, reward_cycle); + am.push(AffirmationMapEntry::PoxAnchorBlockPresent); (am, Some(reward_cycle)) } else { - test_debug!("Prepare-phase commit {},{},{} does NOT descend from anchor block {},{},{} for reward cycle {}", &block_commit.block_header_hash, block_commit.block_height, block_commit.vtxindex, &ab.block_header_hash, ab.block_height, ab.vtxindex, reward_cycle); + test_debug!("Prepare-phase commit {},{},{} does NOT descend from anchor block {},{},{} for reward cycle {}", + &block_commit.block_header_hash, block_commit.block_height, block_commit.vtxindex, &anchor_block.block_header_hash, anchor_block.block_height, anchor_block.vtxindex, reward_cycle); + am.push(AffirmationMapEntry::PoxAnchorBlockAbsent); (am, parent_metadata.anchor_block_descendant) } @@ -619,9 +626,8 @@ impl<'a> BurnchainDBTransaction<'a> { } } - test_debug!( - "Prepare-phase commit {},{},{} affirms parent {},{} with {} descended from {:?}", - &block_commit.block_header_hash, + debug!( + "Prepare-phase commit {},{} affirms parent {},{} with {} descended from {:?}", block_commit.block_height, block_commit.vtxindex, parent.block_height, @@ -633,9 +639,7 @@ impl<'a> BurnchainDBTransaction<'a> { (am, parent_rc_opt) }; - if let Some(am_id) = BurnchainDB::get_affirmation_map_id(&self.sql_tx, &am) - .map_err(|e| BurnchainError::from(e))? - { + if let Some(am_id) = BurnchainDB::get_affirmation_map_id(&self.sql_tx, &am)? { // child doesn't represent any new affirmations by the network, since its // affirmation map already exists. if cfg!(test) { @@ -646,18 +650,14 @@ impl<'a> BurnchainDBTransaction<'a> { &block_commit.txid, block_commit.block_height, block_commit.vtxindex, block_commit.parent_block_ptr, block_commit.parent_vtxindex, &am, _am_weight, &affirmed_reward_cycle); } - self.update_block_commit_affirmation(block_commit, affirmed_reward_cycle, am_id) - .map_err(|e| BurnchainError::from(e))?; + self.update_block_commit_affirmation(block_commit, affirmed_reward_cycle, am_id)?; Ok(am_id) } else { test_debug!("Affirmation map of prepare-phase block-commit {},{},{} (parent {},{}) is new: {:?} weight {} affirmed {:?}", &block_commit.txid, block_commit.block_height, block_commit.vtxindex, block_commit.parent_block_ptr, block_commit.parent_vtxindex, &am, am.weight(), &affirmed_reward_cycle); - let am_id = self - .insert_block_commit_affirmation_map(&am) - .map_err(|e| BurnchainError::from(e))?; - self.update_block_commit_affirmation(block_commit, affirmed_reward_cycle, am_id) - .map_err(|e| BurnchainError::from(e))?; + let am_id = self.insert_block_commit_affirmation_map(&am)?; + self.update_block_commit_affirmation(block_commit, affirmed_reward_cycle, am_id)?; Ok(am_id) } } @@ -750,9 +750,7 @@ impl<'a> BurnchainDBTransaction<'a> { (am, None) }; - if let Some(am_id) = BurnchainDB::get_affirmation_map_id(&self.sql_tx, &am) - .map_err(|e| BurnchainError::from(e))? - { + if let Some(am_id) = BurnchainDB::get_affirmation_map_id(&self.sql_tx, &am)? { // child doesn't represent any new affirmations by the network, since its // affirmation map already exists. if cfg!(test) { @@ -767,22 +765,20 @@ impl<'a> BurnchainDBTransaction<'a> { block_commit, affirmed_anchor_block_reward_cycle, am_id, - ) - .map_err(|e| BurnchainError::from(e))?; + )?; Ok(am_id) } else { test_debug!("Affirmation map of reward-phase block-commit {},{},{} (parent {},{}) is new: {:?} weight {}", &block_commit.txid, block_commit.block_height, block_commit.vtxindex, block_commit.parent_block_ptr, block_commit.parent_vtxindex, &am, am.weight()); - let am_id = self - .insert_block_commit_affirmation_map(&am) - .map_err(|e| BurnchainError::from(e))?; + let am_id = self.insert_block_commit_affirmation_map(&am)?; + self.update_block_commit_affirmation( block_commit, affirmed_anchor_block_reward_cycle, am_id, - ) - .map_err(|e| BurnchainError::from(e))?; + )?; + Ok(am_id) } } @@ -851,9 +847,7 @@ impl<'a> BurnchainDBTransaction<'a> { } pub fn get_canonical_chain_tip(&self) -> Result { - let qry = "SELECT * FROM burnchain_db_block_headers ORDER BY block_height DESC, block_hash ASC LIMIT 1"; - let opt = query_row(&self.sql_tx, qry, NO_PARAMS)?; - Ok(opt.expect("CORRUPTION: No canonical burnchain tip")) + BurnchainDB::inner_get_canonical_chain_tip(&self.sql_tx) } /// You'd only do this in network emergencies, where node operators are expected to declare an @@ -931,6 +925,10 @@ impl BurnchainDB { let db_tx = db.tx_begin()?; sql_pragma(&db_tx.sql_tx, "PRAGMA journal_mode = WAL;")?; db_tx.sql_tx.execute_batch(BURNCHAIN_DB_SCHEMA)?; + db_tx.sql_tx.execute( + "INSERT INTO db_config (version) VALUES (?1)", + &[&BURNCHAIN_DB_VERSION], + )?; let first_block_header = BurnchainBlockHeader { block_height: burnchain.first_block_height, @@ -1521,577 +1519,3 @@ impl BurnchainDB { Ok(heaviest_am) } } - -#[cfg(test)] -pub mod tests { - use std::convert::TryInto; - - use address::*; - use burnchains::bitcoin::address::*; - use burnchains::bitcoin::blocks::*; - use burnchains::bitcoin::*; - use burnchains::PoxConstants; - use burnchains::BLOCKSTACK_MAGIC_MAINNET; - use chainstate::burn::*; - use chainstate::coordinator::tests::*; - use chainstate::stacks::*; - use deps::bitcoin::blockdata::transaction::Transaction as BtcTx; - use deps::bitcoin::network::serialize::deserialize; - use util::hash::*; - - use crate::types::chainstate::StacksAddress; - use crate::types::chainstate::VRFSeed; - - use super::*; - - fn make_tx(hex_str: &str) -> BtcTx { - let tx_bin = hex_bytes(hex_str).unwrap(); - deserialize(&tx_bin.to_vec()).unwrap() - } - - impl BurnchainHeaderReader for Vec { - fn read_burnchain_headers( - &self, - start_height: u64, - end_height: u64, - ) -> Result, DBError> { - if start_height >= self.len() as u64 { - return Ok(vec![]); - } - let end = cmp::min(end_height, self.len() as u64) as usize; - Ok(self[(start_height as usize)..end].to_vec()) - } - - fn get_burnchain_headers_height(&self) -> Result { - Ok(self.len() as u64) - } - } - - #[test] - fn test_store_and_fetch() { - let first_bhh = BurnchainHeaderHash([0; 32]); - let first_timestamp = 321; - let first_height = 1; - - let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = PoxConstants::test_default(); - burnchain.pox_constants.sunset_start = 999; - burnchain.pox_constants.sunset_end = 1000; - - burnchain.first_block_height = first_height; - burnchain.first_block_hash = first_bhh.clone(); - burnchain.first_block_timestamp = first_timestamp; - - let mut burnchain_db = BurnchainDB::connect(":memory:", &burnchain, true).unwrap(); - - let first_block_header = burnchain_db.get_canonical_chain_tip().unwrap(); - assert_eq!(&first_block_header.block_hash, &first_bhh); - assert_eq!(&first_block_header.block_height, &first_height); - assert_eq!(first_block_header.timestamp, first_timestamp as u64); - assert_eq!( - &first_block_header.parent_block_hash, - &BurnchainHeaderHash::sentinel() - ); - - let headers = vec![first_block_header.clone()]; - let canon_hash = BurnchainHeaderHash([1; 32]); - - let canonical_block = BurnchainBlock::Bitcoin(BitcoinBlock::new( - 500, - &canon_hash, - &first_bhh, - &vec![], - 485, - )); - let ops = burnchain_db - .store_new_burnchain_block(&burnchain, &headers, &canonical_block) - .unwrap(); - assert_eq!(ops.len(), 0); - - let vtxindex = 1; - let noncanon_block_height = 400; - let non_canon_hash = BurnchainHeaderHash([2; 32]); - - let fixtures = operations::leader_key_register::tests::get_test_fixtures( - vtxindex, - noncanon_block_height, - non_canon_hash, - ); - - let parser = BitcoinBlockParser::new(BitcoinNetworkType::Testnet, BLOCKSTACK_MAGIC_MAINNET); - let mut broadcast_ops = vec![]; - let mut expected_ops = vec![]; - - for (ix, tx_fixture) in fixtures.iter().enumerate() { - let tx = make_tx(&tx_fixture.txstr); - let burnchain_tx = parser.parse_tx(&tx, ix + 1).unwrap(); - if let Some(res) = &tx_fixture.result { - let mut res = res.clone(); - res.vtxindex = (ix + 1).try_into().unwrap(); - expected_ops.push(res.clone()); - } - broadcast_ops.push(burnchain_tx); - } - - let non_canonical_block = BurnchainBlock::Bitcoin(BitcoinBlock::new( - 400, - &non_canon_hash, - &first_bhh, - &broadcast_ops, - 350, - )); - - let ops = burnchain_db - .store_new_burnchain_block(&burnchain, &headers, &non_canonical_block) - .unwrap(); - assert_eq!(ops.len(), expected_ops.len()); - for op in ops.iter() { - let expected_op = expected_ops - .iter() - .find(|candidate| candidate.txid == op.txid()) - .expect("FAILED to find parsed op in expected ops"); - if let BlockstackOperationType::LeaderKeyRegister(op) = op { - assert_eq!(op, expected_op); - } else { - panic!("EXPECTED to parse a LeaderKeyRegister"); - } - } - - let BurnchainBlockData { header, ops } = - BurnchainDB::get_burnchain_block(&burnchain_db.conn, &non_canon_hash).unwrap(); - assert_eq!(ops.len(), expected_ops.len()); - for op in ops.iter() { - let expected_op = expected_ops - .iter() - .find(|candidate| candidate.txid == op.txid()) - .expect("FAILED to find parsed op in expected ops"); - if let BlockstackOperationType::LeaderKeyRegister(op) = op { - assert_eq!(op, expected_op); - } else { - panic!("EXPECTED to parse a LeaderKeyRegister"); - } - } - assert_eq!(&header, &non_canonical_block.header()); - - let looked_up_canon = burnchain_db.get_canonical_chain_tip().unwrap(); - assert_eq!(&looked_up_canon, &canonical_block.header()); - - let BurnchainBlockData { header, ops } = - BurnchainDB::get_burnchain_block(&burnchain_db.conn, &canon_hash).unwrap(); - assert_eq!(ops.len(), 0); - assert_eq!(&header, &looked_up_canon); - } - - #[test] - fn test_classify_stack_stx() { - let first_bhh = BurnchainHeaderHash([0; 32]); - let first_timestamp = 321; - let first_height = 1; - - let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = PoxConstants::test_default(); - burnchain.pox_constants.sunset_start = 999; - burnchain.pox_constants.sunset_end = 1000; - - burnchain.first_block_height = first_height; - burnchain.first_block_hash = first_bhh.clone(); - burnchain.first_block_timestamp = first_timestamp; - - let mut burnchain_db = BurnchainDB::connect(":memory:", &burnchain, true).unwrap(); - - let first_block_header = burnchain_db.get_canonical_chain_tip().unwrap(); - assert_eq!(&first_block_header.block_hash, &first_bhh); - assert_eq!(&first_block_header.block_height, &first_height); - assert_eq!(first_block_header.timestamp, first_timestamp as u64); - assert_eq!( - &first_block_header.parent_block_hash, - &BurnchainHeaderHash::sentinel() - ); - - let canon_hash = BurnchainHeaderHash([1; 32]); - let mut headers = vec![first_block_header.clone()]; - - let canonical_block = BurnchainBlock::Bitcoin(BitcoinBlock::new( - 500, - &canon_hash, - &first_bhh, - &vec![], - 485, - )); - let ops = burnchain_db - .store_new_burnchain_block(&burnchain, &headers, &canonical_block) - .unwrap(); - assert_eq!(ops.len(), 0); - - // let's mine a block with a pre-stack-stx tx, and a stack-stx tx, - // the stack-stx tx should _fail_ to verify, because there's no - // corresponding pre-stack-stx. - - let parser = BitcoinBlockParser::new(BitcoinNetworkType::Testnet, BLOCKSTACK_MAGIC_MAINNET); - - let pre_stack_stx_0_txid = Txid([5; 32]); - let pre_stack_stx_0 = BitcoinTransaction { - txid: pre_stack_stx_0_txid.clone(), - vtxindex: 0, - opcode: Opcodes::PreStx as u8, - data: vec![0; 80], - data_amt: 0, - inputs: vec![BitcoinTxInput { - keys: vec![], - num_required: 0, - in_type: BitcoinInputType::Standard, - tx_ref: (Txid([0; 32]), 1), - }], - outputs: vec![BitcoinTxOutput { - units: 10, - address: BitcoinAddress { - addrtype: BitcoinAddressType::PublicKeyHash, - network_id: BitcoinNetworkType::Mainnet, - bytes: Hash160([1; 20]), - }, - }], - }; - - // this one will not have a corresponding pre_stack_stx tx. - let stack_stx_0 = BitcoinTransaction { - txid: Txid([4; 32]), - vtxindex: 1, - opcode: Opcodes::StackStx as u8, - data: vec![1; 80], - data_amt: 0, - inputs: vec![BitcoinTxInput { - keys: vec![], - num_required: 0, - in_type: BitcoinInputType::Standard, - tx_ref: (Txid([0; 32]), 1), - }], - outputs: vec![BitcoinTxOutput { - units: 10, - address: BitcoinAddress { - addrtype: BitcoinAddressType::PublicKeyHash, - network_id: BitcoinNetworkType::Mainnet, - bytes: Hash160([1; 20]), - }, - }], - }; - - // this one will have a corresponding pre_stack_stx tx. - let stack_stx_0_second_attempt = BitcoinTransaction { - txid: Txid([4; 32]), - vtxindex: 2, - opcode: Opcodes::StackStx as u8, - data: vec![1; 80], - data_amt: 0, - inputs: vec![BitcoinTxInput { - keys: vec![], - num_required: 0, - in_type: BitcoinInputType::Standard, - tx_ref: (pre_stack_stx_0_txid.clone(), 1), - }], - outputs: vec![BitcoinTxOutput { - units: 10, - address: BitcoinAddress { - addrtype: BitcoinAddressType::PublicKeyHash, - network_id: BitcoinNetworkType::Mainnet, - bytes: Hash160([2; 20]), - }, - }], - }; - - // this one won't have a corresponding pre_stack_stx tx. - let stack_stx_1 = BitcoinTransaction { - txid: Txid([3; 32]), - vtxindex: 3, - opcode: Opcodes::StackStx as u8, - data: vec![1; 80], - data_amt: 0, - inputs: vec![BitcoinTxInput { - keys: vec![], - num_required: 0, - in_type: BitcoinInputType::Standard, - tx_ref: (Txid([0; 32]), 1), - }], - outputs: vec![BitcoinTxOutput { - units: 10, - address: BitcoinAddress { - addrtype: BitcoinAddressType::PublicKeyHash, - network_id: BitcoinNetworkType::Mainnet, - bytes: Hash160([1; 20]), - }, - }], - }; - - // this one won't use the correct output - let stack_stx_2 = BitcoinTransaction { - txid: Txid([8; 32]), - vtxindex: 4, - opcode: Opcodes::StackStx as u8, - data: vec![1; 80], - data_amt: 0, - inputs: vec![BitcoinTxInput { - keys: vec![], - num_required: 0, - in_type: BitcoinInputType::Standard, - tx_ref: (pre_stack_stx_0_txid.clone(), 2), - }], - outputs: vec![BitcoinTxOutput { - units: 10, - address: BitcoinAddress { - addrtype: BitcoinAddressType::PublicKeyHash, - network_id: BitcoinNetworkType::Mainnet, - bytes: Hash160([1; 20]), - }, - }], - }; - - let ops_0 = vec![pre_stack_stx_0, stack_stx_0]; - - let ops_1 = vec![stack_stx_1, stack_stx_0_second_attempt, stack_stx_2]; - - let block_height_0 = 501; - let block_hash_0 = BurnchainHeaderHash([2; 32]); - let block_height_1 = 502; - let block_hash_1 = BurnchainHeaderHash([3; 32]); - - let block_0 = BurnchainBlock::Bitcoin(BitcoinBlock::new( - block_height_0, - &block_hash_0, - &first_bhh, - &ops_0, - 350, - )); - - headers.push(BurnchainBlockHeader { - block_height: first_block_header.block_height + 1, - block_hash: block_hash_0.clone(), - parent_block_hash: first_bhh.clone(), - num_txs: ops_0.len() as u64, - timestamp: first_block_header.timestamp + 1, - }); - - let block_1 = BurnchainBlock::Bitcoin(BitcoinBlock::new( - block_height_1, - &block_hash_1, - &block_hash_0, - &ops_1, - 360, - )); - - headers.push(BurnchainBlockHeader { - block_height: first_block_header.block_height + 2, - block_hash: block_hash_1.clone(), - parent_block_hash: block_hash_0.clone(), - num_txs: ops_1.len() as u64, - timestamp: first_block_header.timestamp + 2, - }); - - let processed_ops_0 = burnchain_db - .store_new_burnchain_block(&burnchain, &headers, &block_0) - .unwrap(); - - assert_eq!( - processed_ops_0.len(), - 1, - "Only pre_stack_stx op should have been accepted" - ); - - let processed_ops_1 = burnchain_db - .store_new_burnchain_block(&burnchain, &headers, &block_1) - .unwrap(); - - assert_eq!( - processed_ops_1.len(), - 1, - "Only one stack_stx op should have been accepted" - ); - - let expected_pre_stack_addr = StacksAddress::from_bitcoin_address(&BitcoinAddress { - addrtype: BitcoinAddressType::PublicKeyHash, - network_id: BitcoinNetworkType::Mainnet, - bytes: Hash160([1; 20]), - }); - - let expected_reward_addr = StacksAddress::from_bitcoin_address(&BitcoinAddress { - addrtype: BitcoinAddressType::PublicKeyHash, - network_id: BitcoinNetworkType::Mainnet, - bytes: Hash160([2; 20]), - }); - - if let BlockstackOperationType::PreStx(op) = &processed_ops_0[0] { - assert_eq!(&op.output, &expected_pre_stack_addr); - } else { - panic!("EXPECTED to parse a pre stack stx op"); - } - - if let BlockstackOperationType::StackStx(op) = &processed_ops_1[0] { - assert_eq!(&op.sender, &expected_pre_stack_addr); - assert_eq!(&op.reward_addr, &expected_reward_addr); - assert_eq!(op.stacked_ustx, u128::from_be_bytes([1; 16])); - assert_eq!(op.num_cycles, 1); - } else { - panic!("EXPECTED to parse a stack stx op"); - } - } - - pub fn make_simple_block_commit( - burnchain: &Burnchain, - parent: Option<&LeaderBlockCommitOp>, - burn_header: &BurnchainBlockHeader, - block_hash: BlockHeaderHash, - ) -> LeaderBlockCommitOp { - let block_height = burn_header.block_height; - let mut new_op = LeaderBlockCommitOp { - sunset_burn: 0, - block_header_hash: block_hash, - new_seed: VRFSeed([1u8; 32]), - parent_block_ptr: 0, - parent_vtxindex: 0, - key_block_ptr: 0, - key_vtxindex: 0, - memo: vec![0], - - commit_outs: vec![ - StacksAddress { - version: 26, - bytes: Hash160::empty(), - }, - StacksAddress { - version: 26, - bytes: Hash160::empty(), - }, - ], - - burn_fee: 10000, - input: (next_txid(), 0), - apparent_sender: BurnchainSigner { - public_keys: vec![StacksPublicKey::from_hex( - "02d8015134d9db8178ac93acbc43170a2f20febba5087a5b0437058765ad5133d0", - ) - .unwrap()], - num_sigs: 1, - hash_mode: AddressHashMode::SerializeP2PKH, - }, - - txid: next_txid(), - vtxindex: 0, - block_height: block_height, - burn_parent_modulus: ((block_height - 1) % BURN_BLOCK_MINED_AT_MODULUS) as u8, - burn_header_hash: burn_header.block_hash.clone(), - }; - - if burnchain.is_in_prepare_phase(block_height) { - new_op.commit_outs = vec![StacksAddress { - version: 26, - bytes: Hash160::empty(), - }]; - } - - if let Some(ref op) = parent { - new_op.parent_block_ptr = op.block_height as u32; - new_op.parent_vtxindex = op.vtxindex as u16; - }; - - new_op - } - - #[test] - fn test_get_commit_at() { - let first_bhh = BurnchainHeaderHash([0; 32]); - let first_timestamp = 0; - let first_height = 1; - - let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = PoxConstants::new(5, 3, 2, 3, 0, 99, 100); - burnchain.first_block_height = first_height; - burnchain.first_block_hash = first_bhh.clone(); - burnchain.first_block_timestamp = first_timestamp; - - let mut burnchain_db = BurnchainDB::connect(":memory:", &burnchain, true).unwrap(); - - let first_block_header = burnchain_db.get_canonical_chain_tip().unwrap(); - - let mut headers = vec![first_block_header.clone()]; - let mut parent = None; - let mut parent_block_header: Option = None; - let mut cmts = vec![]; - - for i in 0..5 { - let hdr = BurnchainHeaderHash([(i + 1) as u8; 32]); - let block_header = BurnchainBlockHeader { - block_height: (first_height + i) as u64, - block_hash: hdr, - parent_block_hash: parent_block_header - .as_ref() - .map(|blk| blk.block_hash.clone()) - .unwrap_or(first_block_header.block_hash.clone()), - num_txs: 1, - timestamp: i as u64, - }; - - headers.push(block_header.clone()); - parent_block_header = Some(block_header); - } - - for i in 0..5 { - let block_header = &headers[i + 1]; - - let cmt = make_simple_block_commit( - &burnchain, - parent.as_ref(), - block_header, - BlockHeaderHash([((i + 1) as u8) | 0x80; 32]), - ); - burnchain_db - .store_new_burnchain_block_ops_unchecked( - &burnchain, - &headers, - block_header, - &vec![BlockstackOperationType::LeaderBlockCommit(cmt.clone())], - ) - .unwrap(); - - cmts.push(cmt.clone()); - parent = Some(cmt); - } - - for i in 0..5 { - let cmt = BurnchainDB::get_commit_at( - &burnchain_db.conn(), - &headers, - (first_height + i) as u32, - 0, - ) - .unwrap() - .unwrap(); - assert_eq!(cmt, cmts[i as usize]); - } - - let cmt = BurnchainDB::get_commit_at(&burnchain_db.conn(), &headers, 5, 0) - .unwrap() - .unwrap(); - assert_eq!(cmt, cmts[4]); - - // fork off the last stored commit block - let fork_hdr = BurnchainHeaderHash([90 as u8; 32]); - let fork_block_header = BurnchainBlockHeader { - block_height: 4, - block_hash: fork_hdr, - parent_block_hash: BurnchainHeaderHash([5 as u8; 32]), - num_txs: 0, - timestamp: 4 as u64, - }; - - burnchain_db - .store_new_burnchain_block_ops_unchecked( - &burnchain, - &headers, - &fork_block_header, - &vec![], - ) - .unwrap(); - headers[4] = fork_block_header; - - let cmt = BurnchainDB::get_commit_at(&burnchain_db.conn(), &headers, 4, 0).unwrap(); - assert!(cmt.is_none()); - } -} diff --git a/src/burnchains/mod.rs b/src/burnchains/mod.rs index 2abe835385..46fc25f306 100644 --- a/src/burnchains/mod.rs +++ b/src/burnchains/mod.rs @@ -60,6 +60,9 @@ pub mod burnchain; pub mod db; pub mod indexer; +#[cfg(test)] +pub mod tests; + #[derive(Serialize, Deserialize)] pub struct Txid(pub [u8; 32]); impl_array_newtype!(Txid, u8, 32); @@ -600,1130 +603,3 @@ impl BurnchainView { self.last_burn_block_hashes = ret; } } - -#[cfg(test)] -pub mod test { - use std::collections::HashMap; - - use address::*; - use burnchains::bitcoin::indexer::BitcoinIndexer; - use burnchains::db::*; - use burnchains::Burnchain; - use burnchains::*; - use chainstate::burn::db::sortdb::*; - use chainstate::burn::operations::BlockstackOperationType; - use chainstate::burn::operations::*; - use chainstate::burn::*; - use chainstate::coordinator::comm::*; - use chainstate::coordinator::*; - use chainstate::stacks::*; - use util::db::*; - use util::get_epoch_time_secs; - use util::hash::*; - use util::secp256k1::*; - use util::vrf::*; - - use crate::types::chainstate::{BlockHeaderHash, SortitionId, VRFSeed}; - - use super::*; - - impl Txid { - pub fn from_test_data( - block_height: u64, - vtxindex: u32, - burn_header_hash: &BurnchainHeaderHash, - noise: u64, - ) -> Txid { - let mut bytes = vec![]; - bytes.extend_from_slice(&block_height.to_be_bytes()); - bytes.extend_from_slice(&vtxindex.to_be_bytes()); - bytes.extend_from_slice(burn_header_hash.as_bytes()); - bytes.extend_from_slice(&noise.to_be_bytes()); - let h = DoubleSha256::from_data(&bytes[..]); - let mut hb = [0u8; 32]; - hb.copy_from_slice(h.as_bytes()); - - Txid(hb) - } - } - - impl BurnchainHeaderHash { - pub fn from_test_data( - block_height: u64, - index_root: &TrieHash, - noise: u64, - ) -> BurnchainHeaderHash { - let mut bytes = vec![]; - bytes.extend_from_slice(&block_height.to_be_bytes()); - bytes.extend_from_slice(index_root.as_bytes()); - bytes.extend_from_slice(&noise.to_be_bytes()); - let h = DoubleSha256::from_data(&bytes[..]); - let mut hb = [0u8; 32]; - hb.copy_from_slice(h.as_bytes()); - - BurnchainHeaderHash(hb) - } - } - - impl BurnchainBlockHeader { - pub fn from_parent_snapshot( - parent_sn: &BlockSnapshot, - block_hash: BurnchainHeaderHash, - num_txs: u64, - ) -> BurnchainBlockHeader { - BurnchainBlockHeader { - block_height: parent_sn.block_height + 1, - block_hash: block_hash, - parent_block_hash: parent_sn.burn_header_hash.clone(), - num_txs: num_txs, - timestamp: get_epoch_time_secs(), - } - } - } - - #[derive(Debug, Clone)] - pub struct TestBurnchainBlock { - pub block_height: u64, - pub parent_snapshot: BlockSnapshot, - pub txs: Vec, - pub fork_id: u64, - pub timestamp: u64, - } - - #[derive(Debug, Clone)] - pub struct TestBurnchainFork { - pub start_height: u64, - pub mined: u64, - pub tip_index_root: TrieHash, - pub tip_header_hash: BurnchainHeaderHash, - pub tip_sortition_id: SortitionId, - pub pending_blocks: Vec, - pub blocks: Vec, - pub fork_id: u64, - } - - pub struct TestBurnchainNode { - pub sortdb: SortitionDB, - pub dirty: bool, - pub burnchain: Burnchain, - } - - #[derive(Debug, Clone)] - pub struct TestMiner { - pub burnchain: Burnchain, - pub privks: Vec, - pub num_sigs: u16, - pub hash_mode: AddressHashMode, - pub microblock_privks: Vec, - pub vrf_keys: Vec, - pub vrf_key_map: HashMap, - pub block_commits: Vec, - pub id: usize, - pub nonce: u64, - pub spent_at_nonce: HashMap, // how much uSTX this miner paid in a given tx's nonce - pub test_with_tx_fees: bool, // set to true to make certain helper methods attach a pre-defined tx fee - } - - pub struct TestMinerFactory { - pub key_seed: [u8; 32], - pub next_miner_id: usize, - } - - impl TestMiner { - pub fn new( - burnchain: &Burnchain, - privks: &Vec, - num_sigs: u16, - hash_mode: &AddressHashMode, - ) -> TestMiner { - TestMiner { - burnchain: burnchain.clone(), - privks: privks.clone(), - num_sigs, - hash_mode: hash_mode.clone(), - microblock_privks: vec![], - vrf_keys: vec![], - vrf_key_map: HashMap::new(), - block_commits: vec![], - id: 0, - nonce: 0, - spent_at_nonce: HashMap::new(), - test_with_tx_fees: true, - } - } - - pub fn last_VRF_public_key(&self) -> Option { - match self.vrf_keys.len() { - 0 => None, - x => Some(VRFPublicKey::from_private(&self.vrf_keys[x - 1])), - } - } - - pub fn last_block_commit(&self) -> Option { - match self.block_commits.len() { - 0 => None, - x => Some(self.block_commits[x - 1].clone()), - } - } - - pub fn next_VRF_key(&mut self) -> VRFPrivateKey { - let pk = if self.vrf_keys.len() == 0 { - // first key is simply the 32-byte hash of the secret state - let mut buf: Vec = vec![]; - for i in 0..self.privks.len() { - buf.extend_from_slice(&self.privks[i].to_bytes()[..]); - } - buf.extend_from_slice(&[ - (self.num_sigs >> 8) as u8, - (self.num_sigs & 0xff) as u8, - self.hash_mode as u8, - ]); - let h = Sha256Sum::from_data(&buf[..]); - VRFPrivateKey::from_bytes(h.as_bytes()).unwrap() - } else { - // next key is just the hash of the last - let h = Sha256Sum::from_data(self.vrf_keys[self.vrf_keys.len() - 1].as_bytes()); - VRFPrivateKey::from_bytes(h.as_bytes()).unwrap() - }; - - self.vrf_keys.push(pk.clone()); - self.vrf_key_map - .insert(VRFPublicKey::from_private(&pk), pk.clone()); - pk - } - - pub fn next_microblock_privkey(&mut self) -> StacksPrivateKey { - let pk = if self.microblock_privks.len() == 0 { - // first key is simply the 32-byte hash of the secret state - let mut buf: Vec = vec![]; - for i in 0..self.privks.len() { - buf.extend_from_slice(&self.privks[i].to_bytes()[..]); - } - buf.extend_from_slice(&[ - (self.num_sigs >> 8) as u8, - (self.num_sigs & 0xff) as u8, - self.hash_mode as u8, - ]); - let h = Sha256Sum::from_data(&buf[..]); - StacksPrivateKey::from_slice(h.as_bytes()).unwrap() - } else { - // next key is the hash of the last - let h = Sha256Sum::from_data( - &self.microblock_privks[self.microblock_privks.len() - 1].to_bytes(), - ); - StacksPrivateKey::from_slice(h.as_bytes()).unwrap() - }; - - self.microblock_privks.push(pk.clone()); - pk - } - - pub fn make_proof( - &self, - vrf_pubkey: &VRFPublicKey, - last_sortition_hash: &SortitionHash, - ) -> Option { - test_debug!( - "Make proof from {} over {}", - vrf_pubkey.to_hex(), - last_sortition_hash - ); - match self.vrf_key_map.get(vrf_pubkey) { - Some(ref prover_key) => { - let proof = VRF::prove(prover_key, &last_sortition_hash.as_bytes().to_vec()); - let valid = match VRF::verify( - vrf_pubkey, - &proof, - &last_sortition_hash.as_bytes().to_vec(), - ) { - Ok(v) => v, - Err(e) => false, - }; - assert!(valid); - Some(proof) - } - None => None, - } - } - - pub fn as_transaction_auth(&self) -> Option { - match self.hash_mode { - AddressHashMode::SerializeP2PKH => TransactionAuth::from_p2pkh(&self.privks[0]), - AddressHashMode::SerializeP2SH => { - TransactionAuth::from_p2sh(&self.privks, self.num_sigs) - } - AddressHashMode::SerializeP2WPKH => TransactionAuth::from_p2wpkh(&self.privks[0]), - AddressHashMode::SerializeP2WSH => { - TransactionAuth::from_p2wsh(&self.privks, self.num_sigs) - } - } - } - - pub fn origin_address(&self) -> Option { - match self.as_transaction_auth() { - Some(auth) => Some(auth.origin().address_testnet()), - None => None, - } - } - - pub fn get_nonce(&self) -> u64 { - self.nonce - } - - pub fn set_nonce(&mut self, n: u64) -> () { - self.nonce = n; - } - - pub fn sign_as_origin(&mut self, tx_signer: &mut StacksTransactionSigner) -> () { - let num_keys = if self.privks.len() < self.num_sigs as usize { - self.privks.len() - } else { - self.num_sigs as usize - }; - - for i in 0..num_keys { - tx_signer.sign_origin(&self.privks[i]).unwrap(); - } - - self.nonce += 1 - } - - pub fn sign_as_sponsor(&mut self, tx_signer: &mut StacksTransactionSigner) -> () { - let num_keys = if self.privks.len() < self.num_sigs as usize { - self.privks.len() - } else { - self.num_sigs as usize - }; - - for i in 0..num_keys { - tx_signer.sign_sponsor(&self.privks[i]).unwrap(); - } - - self.nonce += 1 - } - } - - // creates miners deterministically - impl TestMinerFactory { - pub fn new() -> TestMinerFactory { - TestMinerFactory { - key_seed: [0u8; 32], - next_miner_id: 1, - } - } - - pub fn from_u16(seed: u16) -> TestMinerFactory { - let mut bytes = [0u8; 32]; - (&mut bytes[0..2]).copy_from_slice(&seed.to_be_bytes()); - TestMinerFactory { - key_seed: bytes, - next_miner_id: seed as usize, - } - } - - pub fn next_private_key(&mut self) -> StacksPrivateKey { - let h = Sha256Sum::from_data(&self.key_seed); - self.key_seed.copy_from_slice(h.as_bytes()); - - StacksPrivateKey::from_slice(h.as_bytes()).unwrap() - } - - pub fn next_miner( - &mut self, - burnchain: &Burnchain, - num_keys: u16, - num_sigs: u16, - hash_mode: AddressHashMode, - ) -> TestMiner { - let mut keys = vec![]; - for i in 0..num_keys { - keys.push(self.next_private_key()); - } - - test_debug!("New miner: {:?} {}:{:?}", &hash_mode, num_sigs, &keys); - let mut m = TestMiner::new(burnchain, &keys, num_sigs, &hash_mode); - m.id = self.next_miner_id; - self.next_miner_id += 1; - m - } - } - - impl TestBurnchainBlock { - pub fn new(parent_snapshot: &BlockSnapshot, fork_id: u64) -> TestBurnchainBlock { - TestBurnchainBlock { - parent_snapshot: parent_snapshot.clone(), - block_height: parent_snapshot.block_height + 1, - txs: vec![], - fork_id: fork_id, - timestamp: get_epoch_time_secs(), - } - } - - pub fn add_leader_key_register(&mut self, miner: &mut TestMiner) -> LeaderKeyRegisterOp { - let next_vrf_key = miner.next_VRF_key(); - let mut txop = LeaderKeyRegisterOp::new_from_secrets( - &miner.privks, - miner.num_sigs, - &miner.hash_mode, - &next_vrf_key, - ) - .unwrap(); - - txop.vtxindex = self.txs.len() as u32; - txop.block_height = self.block_height; - txop.burn_header_hash = BurnchainHeaderHash::from_test_data( - txop.block_height, - &self.parent_snapshot.index_root, - self.fork_id, - ); - txop.txid = - Txid::from_test_data(txop.block_height, txop.vtxindex, &txop.burn_header_hash, 0); - txop.consensus_hash = self.parent_snapshot.consensus_hash.clone(); - - self.txs - .push(BlockstackOperationType::LeaderKeyRegister(txop.clone())); - - txop - } - - pub fn add_leader_block_commit( - &mut self, - ic: &SortitionDBConn, - miner: &mut TestMiner, - block_hash: &BlockHeaderHash, - burn_fee: u64, - leader_key: &LeaderKeyRegisterOp, - fork_snapshot: Option<&BlockSnapshot>, - parent_block_snapshot: Option<&BlockSnapshot>, - ) -> LeaderBlockCommitOp { - let input = (Txid([0; 32]), 0); - let pubks = miner - .privks - .iter() - .map(|ref pk| StacksPublicKey::from_private(pk)) - .collect(); - let apparent_sender = BurnchainSigner { - hash_mode: miner.hash_mode.clone(), - num_sigs: miner.num_sigs as usize, - public_keys: pubks, - }; - - let last_snapshot = match fork_snapshot { - Some(sn) => sn.clone(), - None => SortitionDB::get_canonical_burn_chain_tip(ic).unwrap(), - }; - - let last_snapshot_with_sortition = match parent_block_snapshot { - Some(sn) => sn.clone(), - None => SortitionDB::get_first_block_snapshot(ic).unwrap(), - }; - - // prove on the last-ever sortition's hash to produce the new seed - let proof = miner - .make_proof(&leader_key.public_key, &last_snapshot.sortition_hash) - .expect(&format!( - "FATAL: no private key for {}", - leader_key.public_key.to_hex() - )); - - let new_seed = VRFSeed::from_proof(&proof); - - let get_commit_res = SortitionDB::get_block_commit( - ic.conn(), - &last_snapshot_with_sortition.winning_block_txid, - &last_snapshot_with_sortition.sortition_id, - ) - .expect("FATAL: failed to read block commit"); - let mut txop = match get_commit_res { - Some(parent) => { - let txop = LeaderBlockCommitOp::new( - block_hash, - self.block_height, - &new_seed, - &parent, - leader_key.block_height as u32, - leader_key.vtxindex as u16, - burn_fee, - &input, - &apparent_sender, - ); - txop - } - None => { - // initial - let txop = LeaderBlockCommitOp::initial( - block_hash, - self.block_height, - &new_seed, - leader_key, - burn_fee, - &input, - &apparent_sender, - ); - txop - } - }; - - txop.set_burn_height(self.block_height); - txop.vtxindex = self.txs.len() as u32; - txop.burn_header_hash = BurnchainHeaderHash::from_test_data( - txop.block_height, - &self.parent_snapshot.index_root, - self.fork_id, - ); // NOTE: override this if you intend to insert into the sortdb! - txop.txid = - Txid::from_test_data(txop.block_height, txop.vtxindex, &txop.burn_header_hash, 0); - - self.txs - .push(BlockstackOperationType::LeaderBlockCommit(txop.clone())); - - miner.block_commits.push(txop.clone()); - txop - } - - // TODO: user burn support - - pub fn patch_from_chain_tip(&mut self, parent_snapshot: &BlockSnapshot) -> () { - assert_eq!(parent_snapshot.block_height + 1, self.block_height); - - for i in 0..self.txs.len() { - match self.txs[i] { - BlockstackOperationType::LeaderKeyRegister(ref mut data) => { - assert_eq!(data.block_height, self.block_height); - data.consensus_hash = parent_snapshot.consensus_hash.clone(); - } - - BlockstackOperationType::UserBurnSupport(ref mut data) => { - assert_eq!(data.block_height, self.block_height); - data.consensus_hash = parent_snapshot.consensus_hash.clone(); - } - _ => {} - } - } - } - - pub fn mine(&self, db: &mut SortitionDB, burnchain: &Burnchain) -> BlockSnapshot { - let block_hash = BurnchainHeaderHash::from_test_data( - self.block_height, - &self.parent_snapshot.index_root, - self.fork_id, - ); - let mock_bitcoin_block = BitcoinBlock::new( - self.block_height, - &block_hash, - &self.parent_snapshot.burn_header_hash, - &vec![], - get_epoch_time_secs(), - ); - let block = BurnchainBlock::Bitcoin(mock_bitcoin_block); - - test_debug!( - "Process block {} {}", - block.block_height(), - &block.block_hash() - ); - - let header = block.header(); - let sort_id = SortitionId::stubbed(&header.parent_block_hash); - let mut sortition_db_handle = SortitionHandleTx::begin(db, &sort_id).unwrap(); - - let parent_snapshot = sortition_db_handle - .get_block_snapshot(&header.parent_block_hash, &sort_id) - .unwrap() - .expect("FATAL: failed to get burnchain linkage info"); - - let blockstack_txs = self.txs.clone(); - - let burnchain_db = - BurnchainDB::connect(&burnchain.get_burnchaindb_path(), &burnchain, true).unwrap(); - - let new_snapshot = sortition_db_handle - .process_block_txs( - &parent_snapshot, - &header, - burnchain, - blockstack_txs, - None, - PoxId::stubbed(), - None, - 0, - ) - .unwrap(); - sortition_db_handle.commit().unwrap(); - - new_snapshot.0 - } - - pub fn mine_pox< - 'a, - T: BlockEventDispatcher, - N: CoordinatorNotices, - R: RewardSetProvider, - >( - &self, - db: &mut SortitionDB, - burnchain: &Burnchain, - coord: &mut ChainsCoordinator<'a, T, N, R>, - ) -> BlockSnapshot { - let block_hash = BurnchainHeaderHash::from_test_data( - self.block_height, - &self.parent_snapshot.index_root, - self.fork_id, - ); - let mock_bitcoin_block = BitcoinBlock::new( - self.block_height, - &block_hash, - &self.parent_snapshot.burn_header_hash, - &vec![], - get_epoch_time_secs(), - ); - let block = BurnchainBlock::Bitcoin(mock_bitcoin_block); - - test_debug!( - "Process PoX block {} {}", - block.block_height(), - &block.block_hash() - ); - - let header = block.header(); - let indexer: BitcoinIndexer = burnchain.make_indexer().unwrap(); - - let mut burnchain_db = - BurnchainDB::open(&burnchain.get_burnchaindb_path(), true).unwrap(); - burnchain_db - .raw_store_burnchain_block(burnchain, &indexer, header.clone(), self.txs.clone()) - .unwrap(); - - coord.handle_new_burnchain_block().unwrap(); - - let snapshot = SortitionDB::get_canonical_burn_chain_tip(db.conn()).unwrap(); - snapshot - } - } - - impl TestBurnchainFork { - pub fn new( - start_height: u64, - start_header_hash: &BurnchainHeaderHash, - start_index_root: &TrieHash, - fork_id: u64, - ) -> TestBurnchainFork { - TestBurnchainFork { - start_height, - mined: 0, - tip_header_hash: start_header_hash.clone(), - tip_sortition_id: SortitionId([0x00; 32]), - tip_index_root: start_index_root.clone(), - blocks: vec![], - pending_blocks: vec![], - fork_id: fork_id, - } - } - - pub fn fork(&self) -> TestBurnchainFork { - let mut new_fork = (*self).clone(); - new_fork.fork_id += 1; - new_fork - } - - pub fn append_block(&mut self, b: TestBurnchainBlock) -> () { - self.pending_blocks.push(b); - } - - pub fn get_tip(&mut self, ic: &SortitionDBConn) -> BlockSnapshot { - test_debug!( - "Get tip snapshot at {} (sortition ID {})", - &self.tip_header_hash, - &self.tip_sortition_id - ); - SortitionDB::get_block_snapshot(ic, &self.tip_sortition_id) - .unwrap() - .unwrap() - } - - pub fn next_block(&mut self, ic: &SortitionDBConn) -> TestBurnchainBlock { - let fork_tip = self.get_tip(ic); - TestBurnchainBlock::new(&fork_tip, self.fork_id) - } - - pub fn mine_pending_blocks( - &mut self, - db: &mut SortitionDB, - burnchain: &Burnchain, - ) -> BlockSnapshot { - let mut snapshot = { - let ic = db.index_conn(); - self.get_tip(&ic) - }; - - for mut block in self.pending_blocks.drain(..) { - // fill in consensus hash and block hash, which we may not have known at the call - // to next_block (since we can call next_block() many times without mining blocks) - block.patch_from_chain_tip(&snapshot); - - snapshot = block.mine(db, burnchain); - - self.blocks.push(block); - self.mined += 1; - self.tip_index_root = snapshot.index_root; - self.tip_header_hash = snapshot.burn_header_hash; - self.tip_sortition_id = snapshot.sortition_id; - } - - // give back the new chain tip - snapshot - } - - pub fn mine_pending_blocks_pox< - 'a, - T: BlockEventDispatcher, - N: CoordinatorNotices, - R: RewardSetProvider, - >( - &mut self, - db: &mut SortitionDB, - burnchain: &Burnchain, - coord: &mut ChainsCoordinator<'a, T, N, R>, - ) -> BlockSnapshot { - let mut snapshot = { - let ic = db.index_conn(); - self.get_tip(&ic) - }; - - for mut block in self.pending_blocks.drain(..) { - // fill in consensus hash and block hash, which we may not have known at the call - // to next_block (since we can call next_block() many times without mining blocks) - block.patch_from_chain_tip(&snapshot); - - snapshot = block.mine_pox(db, burnchain, coord); - - self.blocks.push(block); - self.mined += 1; - self.tip_index_root = snapshot.index_root; - self.tip_header_hash = snapshot.burn_header_hash; - self.tip_sortition_id = snapshot.sortition_id; - } - - // give back the new chain tip - snapshot - } - } - - impl TestBurnchainNode { - pub fn new() -> TestBurnchainNode { - let first_block_height = 100; - let first_block_hash = BurnchainHeaderHash([0u8; 32]); - let db = SortitionDB::connect_test(first_block_height, &first_block_hash).unwrap(); - TestBurnchainNode { - sortdb: db, - dirty: false, - burnchain: Burnchain::default_unittest(first_block_height, &first_block_hash), - } - } - - pub fn mine_fork(&mut self, fork: &mut TestBurnchainFork) -> BlockSnapshot { - fork.mine_pending_blocks(&mut self.sortdb, &self.burnchain) - } - } - - fn process_next_sortition( - node: &mut TestBurnchainNode, - fork: &mut TestBurnchainFork, - miners: &mut Vec, - prev_keys: &Vec, - block_hashes: &Vec, - ) -> ( - BlockSnapshot, - Vec, - Vec, - Vec, - ) { - assert_eq!(miners.len(), block_hashes.len()); - - let mut block = { - let ic = node.sortdb.index_conn(); - fork.next_block(&ic) - }; - - let mut next_commits = vec![]; - let mut next_prev_keys = vec![]; - - if prev_keys.len() > 0 { - assert_eq!(miners.len(), prev_keys.len()); - - // make a Stacks block (hash) for each of the prior block's keys - for j in 0..miners.len() { - let block_commit_op = { - let ic = node.sortdb.index_conn(); - let hash = block_hashes[j].clone(); - block.add_leader_block_commit( - &ic, - &mut miners[j], - &hash, - ((j + 1) as u64) * 1000, - &prev_keys[j], - None, - None, - ) - }; - next_commits.push(block_commit_op); - } - } - - // have each leader register a VRF key - for j in 0..miners.len() { - let key_register_op = block.add_leader_key_register(&mut miners[j]); - next_prev_keys.push(key_register_op); - } - - test_debug!("Mine {} transactions", block.txs.len()); - - fork.append_block(block); - let tip_snapshot = node.mine_fork(fork); - - // TODO: user burn support - (tip_snapshot, next_prev_keys, next_commits, vec![]) - } - - fn verify_keys_accepted( - node: &mut TestBurnchainNode, - prev_keys: &Vec, - ) -> () { - // all keys accepted - for key in prev_keys.iter() { - let tx_opt = - SortitionDB::get_burnchain_transaction(node.sortdb.conn(), &key.txid).unwrap(); - assert!(tx_opt.is_some()); - - let tx = tx_opt.unwrap(); - match tx { - BlockstackOperationType::LeaderKeyRegister(ref op) => { - assert_eq!(*op, *key); - } - _ => { - assert!(false); - } - } - } - } - - fn verify_commits_accepted( - node: &TestBurnchainNode, - next_block_commits: &Vec, - ) -> () { - // all commits accepted - for commit in next_block_commits.iter() { - let tx_opt = - SortitionDB::get_burnchain_transaction(node.sortdb.conn(), &commit.txid).unwrap(); - assert!(tx_opt.is_some()); - - let tx = tx_opt.unwrap(); - match tx { - BlockstackOperationType::LeaderBlockCommit(ref op) => { - assert_eq!(*op, *commit); - } - _ => { - assert!(false); - } - } - } - } - - #[test] - fn mine_10_stacks_blocks_1_fork() { - let mut node = TestBurnchainNode::new(); - let mut miner_factory = TestMinerFactory::new(); - - let mut miners = vec![]; - for i in 0..10 { - miners.push(miner_factory.next_miner( - &node.burnchain, - 1, - 1, - AddressHashMode::SerializeP2PKH, - )); - } - - let first_snapshot = SortitionDB::get_first_block_snapshot(node.sortdb.conn()).unwrap(); - let mut fork = TestBurnchainFork::new( - first_snapshot.block_height, - &first_snapshot.burn_header_hash, - &first_snapshot.index_root, - 0, - ); - let mut prev_keys = vec![]; - - for i in 0..10 { - let mut next_block_hashes = vec![]; - for j in 0..miners.len() { - let hash = BlockHeaderHash([(i * 10 + j + miners.len()) as u8; 32]); - next_block_hashes.push(hash); - } - - let (next_snapshot, mut next_prev_keys, next_block_commits, next_user_burns) = - process_next_sortition( - &mut node, - &mut fork, - &mut miners, - &prev_keys, - &next_block_hashes, - ); - - verify_keys_accepted(&mut node, &prev_keys); - verify_commits_accepted(&mut node, &next_block_commits); - - prev_keys.clear(); - prev_keys.append(&mut next_prev_keys); - } - } - - #[test] - fn mine_10_stacks_blocks_2_forks_disjoint() { - let mut node = TestBurnchainNode::new(); - let mut miner_factory = TestMinerFactory::new(); - - let mut miners = vec![]; - for i in 0..10 { - miners.push(miner_factory.next_miner( - &node.burnchain, - 1, - 1, - AddressHashMode::SerializeP2PKH, - )); - } - - let first_snapshot = SortitionDB::get_first_block_snapshot(node.sortdb.conn()).unwrap(); - let mut fork_1 = TestBurnchainFork::new( - first_snapshot.block_height, - &first_snapshot.burn_header_hash, - &first_snapshot.index_root, - 0, - ); - let mut prev_keys_1 = vec![]; - - // one fork for 5 blocks... - for i in 0..5 { - let mut next_block_hashes = vec![]; - for j in 0..miners.len() { - let hash = BlockHeaderHash([(i * 10 + j + miners.len()) as u8; 32]); - next_block_hashes.push(hash); - } - - let (next_snapshot, mut next_prev_keys, next_block_commits, next_user_burns) = - process_next_sortition( - &mut node, - &mut fork_1, - &mut miners, - &prev_keys_1, - &next_block_hashes, - ); - - verify_keys_accepted(&mut node, &prev_keys_1); - verify_commits_accepted(&mut node, &next_block_commits); - - prev_keys_1.clear(); - prev_keys_1.append(&mut next_prev_keys); - } - - let mut fork_2 = fork_1.fork(); - let mut prev_keys_2 = prev_keys_1[5..].to_vec(); - prev_keys_1.truncate(5); - - let mut miners_1 = vec![]; - let mut miners_2 = vec![]; - - let mut miners_drain = miners.drain(..); - for i in 0..5 { - let m = miners_drain.next().unwrap(); - miners_1.push(m); - } - for i in 0..5 { - let m = miners_drain.next().unwrap(); - miners_2.push(m); - } - - // two disjoint forks for 5 blocks... - for i in 5..10 { - let mut next_block_hashes_1 = vec![]; - for j in 0..miners_1.len() { - let hash = BlockHeaderHash( - [(i * (miners_1.len() + miners_2.len()) + j + miners_1.len() + miners_2.len()) - as u8; 32], - ); - next_block_hashes_1.push(hash); - } - - let mut next_block_hashes_2 = vec![]; - for j in 0..miners_2.len() { - let hash = BlockHeaderHash( - [(i * (miners_1.len() + miners_2.len()) - + (5 + j) - + miners_1.len() - + miners_2.len()) as u8; 32], - ); - next_block_hashes_2.push(hash); - } - - let (next_snapshot_1, mut next_prev_keys_1, next_block_commits_1, next_user_burns_1) = - process_next_sortition( - &mut node, - &mut fork_1, - &mut miners_1, - &prev_keys_1, - &next_block_hashes_1, - ); - let (next_snapshot_2, mut next_prev_keys_2, next_block_commits_2, next_user_burns_2) = - process_next_sortition( - &mut node, - &mut fork_2, - &mut miners_2, - &prev_keys_2, - &next_block_hashes_2, - ); - - assert!(next_snapshot_1.burn_header_hash != next_snapshot_2.burn_header_hash); - - verify_keys_accepted(&mut node, &prev_keys_1); - verify_commits_accepted(&mut node, &next_block_commits_1); - - verify_keys_accepted(&mut node, &prev_keys_2); - verify_commits_accepted(&mut node, &next_block_commits_2); - - prev_keys_1.clear(); - prev_keys_1.append(&mut next_prev_keys_1); - - prev_keys_2.clear(); - prev_keys_2.append(&mut next_prev_keys_2); - } - } - - #[test] - fn mine_10_stacks_blocks_2_forks_disjoint_same_blocks() { - let mut node = TestBurnchainNode::new(); - let mut miner_factory = TestMinerFactory::new(); - - let mut miners = vec![]; - for i in 0..10 { - miners.push(miner_factory.next_miner( - &node.burnchain, - 1, - 1, - AddressHashMode::SerializeP2PKH, - )); - } - - let first_snapshot = SortitionDB::get_first_block_snapshot(node.sortdb.conn()).unwrap(); - let mut fork_1 = TestBurnchainFork::new( - first_snapshot.block_height, - &first_snapshot.burn_header_hash, - &first_snapshot.index_root, - 0, - ); - let mut prev_keys_1 = vec![]; - - // one fork for 5 blocks... - for i in 0..5 { - let mut next_block_hashes = vec![]; - for j in 0..miners.len() { - let hash = BlockHeaderHash([(i * 10 + j + miners.len()) as u8; 32]); - next_block_hashes.push(hash); - } - - let (snapshot, mut next_prev_keys, next_block_commits, next_user_burns) = - process_next_sortition( - &mut node, - &mut fork_1, - &mut miners, - &prev_keys_1, - &next_block_hashes, - ); - - verify_keys_accepted(&mut node, &prev_keys_1); - verify_commits_accepted(&mut node, &next_block_commits); - - prev_keys_1.clear(); - prev_keys_1.append(&mut next_prev_keys); - } - - let mut fork_2 = fork_1.fork(); - let mut prev_keys_2 = prev_keys_1[5..].to_vec(); - prev_keys_1.truncate(5); - - let mut miners_1 = vec![]; - let mut miners_2 = vec![]; - - let mut miners_drain = miners.drain(..); - for i in 0..5 { - let m = miners_drain.next().unwrap(); - miners_1.push(m); - } - for i in 0..5 { - let m = miners_drain.next().unwrap(); - miners_2.push(m); - } - - // two disjoint forks for 5 blocks, but miners in each fork mine the same blocks. - // This tests that we can accept two burnchain forks that each contain the same stacks - // block history. - for i in 5..10 { - let mut next_block_hashes_1 = vec![]; - for j in 0..miners_1.len() { - let hash = BlockHeaderHash( - [(i * (miners_1.len() + miners_2.len()) + j + miners_1.len() + miners_2.len()) - as u8; 32], - ); - next_block_hashes_1.push(hash); - } - - let mut next_block_hashes_2 = vec![]; - for j in 0..miners_2.len() { - let hash = BlockHeaderHash( - [(i * (miners_1.len() + miners_2.len()) + j + miners_1.len() + miners_2.len()) - as u8; 32], - ); - next_block_hashes_2.push(hash); - } - - let (snapshot_1, mut next_prev_keys_1, next_block_commits_1, next_user_burns_1) = - process_next_sortition( - &mut node, - &mut fork_1, - &mut miners_1, - &prev_keys_1, - &next_block_hashes_1, - ); - let (snapshot_2, mut next_prev_keys_2, next_block_commits_2, next_user_burns_2) = - process_next_sortition( - &mut node, - &mut fork_2, - &mut miners_2, - &prev_keys_2, - &next_block_hashes_2, - ); - - assert!(snapshot_1.burn_header_hash != snapshot_2.burn_header_hash); - assert!(snapshot_1.consensus_hash != snapshot_2.consensus_hash); - - // same blocks mined in both forks - assert_eq!(next_block_commits_1.len(), next_block_commits_2.len()); - for i in 0..next_block_commits_1.len() { - assert_eq!( - next_block_commits_1[i].block_header_hash, - next_block_commits_2[i].block_header_hash - ); - } - - verify_keys_accepted(&mut node, &prev_keys_1); - verify_commits_accepted(&mut node, &next_block_commits_1); - - verify_keys_accepted(&mut node, &prev_keys_2); - verify_commits_accepted(&mut node, &next_block_commits_2); - - prev_keys_1.clear(); - prev_keys_1.append(&mut next_prev_keys_1); - - prev_keys_2.clear(); - prev_keys_2.append(&mut next_prev_keys_2); - } - } -} diff --git a/src/burnchains/tests/affirmation.rs b/src/burnchains/tests/affirmation.rs new file mode 100644 index 0000000000..e7a2d7ed66 --- /dev/null +++ b/src/burnchains/tests/affirmation.rs @@ -0,0 +1,2387 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2021 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::cmp; +use std::collections::HashSet; +use std::collections::VecDeque; +use std::sync::{ + atomic::{AtomicBool, AtomicU64, Ordering}, + mpsc::sync_channel, + Arc, RwLock, +}; + +use rusqlite::Connection; + +use address; +use burnchains::affirmation::*; +use burnchains::bitcoin::address::BitcoinAddress; +use burnchains::bitcoin::indexer::BitcoinIndexer; +use burnchains::bitcoin::BitcoinNetworkType; +use burnchains::tests::db::*; +use burnchains::{db::*, *}; +use burnchains::{BurnchainBlock, BurnchainBlockHeader, Txid}; +use chainstate; +use chainstate::burn::db::sortdb::SortitionDB; +use chainstate::burn::operations::leader_block_commit::*; +use chainstate::burn::operations::*; +use chainstate::burn::*; +use chainstate::coordinator::{Error as CoordError, *}; +use chainstate::stacks::*; +use clarity_vm::clarity::ClarityConnection; +use core; +use core::*; +use monitoring::increment_stx_blocks_processed_counter; +use util::hash::{hex_bytes, Hash160}; +use util::vrf::*; +use vm::{ + costs::{ExecutionCost, LimitedCostTracker}, + types::PrincipalData, + types::QualifiedContractIdentifier, + Value, +}; + +use crate::types::chainstate::StacksBlockId; +use crate::types::chainstate::{ + BlockHeaderHash, BurnchainHeaderHash, PoxId, SortitionId, StacksAddress, VRFSeed, +}; +use crate::types::proof::TrieHash; +use crate::{types, util}; + +use chainstate::coordinator::tests::*; + +#[test] +fn affirmation_map_encode_decode() { + assert_eq!(AffirmationMap::decode(""), Some(AffirmationMap::empty())); + assert_eq!( + AffirmationMap::decode("anp"), + Some(AffirmationMap::new(vec![ + AffirmationMapEntry::PoxAnchorBlockAbsent, + AffirmationMapEntry::Nothing, + AffirmationMapEntry::PoxAnchorBlockPresent + ])) + ); + assert_eq!(AffirmationMap::decode("x"), None); + + assert_eq!(AffirmationMap::empty().encode(), "".to_string()); + assert_eq!( + AffirmationMap::new(vec![ + AffirmationMapEntry::PoxAnchorBlockAbsent, + AffirmationMapEntry::Nothing, + AffirmationMapEntry::PoxAnchorBlockPresent + ]) + .encode(), + "anp".to_string() + ); +} + +#[test] +fn affirmation_map_find_divergence() { + assert_eq!( + AffirmationMap::decode("aaa") + .unwrap() + .find_divergence(&AffirmationMap::decode("aaa").unwrap()), + None + ); + assert_eq!( + AffirmationMap::decode("aaa") + .unwrap() + .find_divergence(&AffirmationMap::decode("aaaa").unwrap()), + Some(3) + ); + assert_eq!( + AffirmationMap::decode("aaa") + .unwrap() + .find_divergence(&AffirmationMap::decode("aa").unwrap()), + None + ); + assert_eq!( + AffirmationMap::decode("apa") + .unwrap() + .find_divergence(&AffirmationMap::decode("aaa").unwrap()), + Some(1) + ); + assert_eq!( + AffirmationMap::decode("apa") + .unwrap() + .find_divergence(&AffirmationMap::decode("aaaa").unwrap()), + Some(1) + ); + assert_eq!( + AffirmationMap::decode("naa") + .unwrap() + .find_divergence(&AffirmationMap::decode("aa").unwrap()), + Some(0) + ); + assert_eq!( + AffirmationMap::decode("napn") + .unwrap() + .find_divergence(&AffirmationMap::decode("").unwrap()), + None + ); + assert_eq!( + AffirmationMap::decode("pn") + .unwrap() + .find_divergence(&AffirmationMap::decode("n").unwrap()), + Some(0) + ); +} + +fn make_simple_key_register( + burn_header_hash: &BurnchainHeaderHash, + block_height: u64, + vtxindex: u32, +) -> LeaderKeyRegisterOp { + LeaderKeyRegisterOp { + consensus_hash: ConsensusHash::from_bytes( + &hex_bytes("2222222222222222222222222222222222222222").unwrap(), + ) + .unwrap(), + public_key: VRFPublicKey::from_bytes( + &hex_bytes("a366b51292bef4edd64063d9145c617fec373bceb0758e98cd72becd84d54c7a").unwrap(), + ) + .unwrap(), + memo: vec![01, 02, 03, 04, 05], + address: StacksAddress::from_bitcoin_address( + &BitcoinAddress::from_scriptpubkey( + BitcoinNetworkType::Testnet, + &hex_bytes("76a9140be3e286a15ea85882761618e366586b5574100d88ac").unwrap(), + ) + .unwrap(), + ), + + txid: next_txid(), + vtxindex: vtxindex, + block_height: block_height, + burn_header_hash: burn_header_hash.clone(), + } +} + +pub fn make_reward_cycle_with_vote( + burnchain_db: &mut BurnchainDB, + burnchain: &Burnchain, + key: &LeaderKeyRegisterOp, + headers: &mut Vec, + mut parent_commits: Vec>, + confirm_anchor_block: bool, +) -> ( + Vec, + Vec>>, +) { + let mut new_headers = vec![]; + let mut new_commits = vec![]; + + let first_block_header = burnchain_db.get_first_header().unwrap(); + let mut current_header = burnchain_db.get_canonical_chain_tip().unwrap(); + let mut height = current_header.block_height + 1; + let mut parent_block_header: Option = + Some(headers.last().unwrap().to_owned()); + + for i in 0..burnchain.pox_constants.reward_cycle_length { + let block_header = BurnchainBlockHeader { + block_height: height, + block_hash: next_burn_header_hash(), + parent_block_hash: parent_block_header + .as_ref() + .map(|blk| blk.block_hash.clone()) + .unwrap_or(first_block_header.block_hash.clone()), + num_txs: parent_commits.len() as u64, + timestamp: i as u64, + }; + + let ops = if current_header == first_block_header { + // first-ever block -- add only the leader key + let mut key_insert = key.clone(); + key_insert.burn_header_hash = block_header.block_hash.clone(); + + test_debug!( + "Insert key-register in {}: {},{},{} in block {}", + &key_insert.burn_header_hash, + &key_insert.txid, + key_insert.block_height, + key_insert.vtxindex, + block_header.block_height + ); + + new_commits.push(vec![None; parent_commits.len()]); + vec![BlockstackOperationType::LeaderKeyRegister( + key_insert.clone(), + )] + } else { + let mut commits = vec![]; + for i in 0..parent_commits.len() { + let mut block_commit = make_simple_block_commit( + &burnchain, + parent_commits[i].as_ref(), + &block_header, + next_block_hash(), + ); + block_commit.key_block_ptr = key.block_height as u32; + block_commit.key_vtxindex = key.vtxindex as u16; + block_commit.vtxindex += i as u32; + block_commit.burn_parent_modulus = if height > 0 { + ((height - 1) % BURN_BLOCK_MINED_AT_MODULUS) as u8 + } else { + BURN_BLOCK_MINED_AT_MODULUS as u8 - 1 + }; + + assert_eq!(block_commit.burn_header_hash, block_header.block_hash); + assert_eq!(block_commit.block_height, block_header.block_height); + + let append = if !burnchain.is_in_prepare_phase(block_commit.block_height) { + // non-prepare-phase commits always confirm their parent + true + } else { + if confirm_anchor_block { + // all block-commits confirm anchor block + true + } else { + // fewer than anchor_threshold commits confirm anchor block + let next_rc_start = burnchain.reward_cycle_to_block_height( + burnchain + .block_height_to_reward_cycle(block_commit.block_height) + .unwrap() + + 1, + ); + if block_commit.block_height + + (burnchain.pox_constants.anchor_threshold as u64) + + 1 + < next_rc_start + { + // in first half of prepare phase, so confirm + true + } else { + // in second half of prepare phase, so don't confirm + false + } + } + }; + + if append { + test_debug!( + "Insert block-commit in {}: {},{},{}, builds on {},{}", + &block_commit.burn_header_hash, + &block_commit.txid, + block_commit.block_height, + block_commit.vtxindex, + block_commit.parent_block_ptr, + block_commit.parent_vtxindex + ); + + if let Some(ref parent_commit) = parent_commits[i].as_ref() { + assert!( + parent_commit.block_height as u64 != block_commit.block_height as u64 + ); + assert!( + parent_commit.block_height as u64 + == block_commit.parent_block_ptr as u64 + ); + assert!( + parent_commit.vtxindex as u64 == block_commit.parent_vtxindex as u64 + ); + } + + parent_commits[i] = Some(block_commit.clone()); + commits.push(Some(block_commit.clone())); + } else { + test_debug!( + "Do NOT insert block-commit in {}: {},{},{}", + &block_commit.burn_header_hash, + &block_commit.txid, + block_commit.block_height, + block_commit.vtxindex + ); + + commits.push(None); + } + } + new_commits.push(commits.clone()); + commits + .into_iter() + .filter_map(|cmt| cmt) + .map(|cmt| BlockstackOperationType::LeaderBlockCommit(cmt)) + .collect() + }; + + burnchain_db + .store_new_burnchain_block_ops_unchecked(burnchain, headers, &block_header, &ops) + .unwrap(); + + headers.push(block_header.clone()); + new_headers.push(block_header.clone()); + parent_block_header = Some(block_header); + + current_header = burnchain_db.get_canonical_chain_tip().unwrap(); + height = current_header.block_height + 1; + } + + (new_headers, new_commits) +} + +fn make_simple_reward_cycle( + burnchain_db: &mut BurnchainDB, + burnchain: &Burnchain, + key: &LeaderKeyRegisterOp, + headers: &mut Vec, + parent_commit: Option, +) -> (Vec, Vec>) { + let (new_headers, commits) = + make_reward_cycle(burnchain_db, burnchain, key, headers, vec![parent_commit]); + ( + new_headers, + commits + .into_iter() + .map(|mut cmts| cmts.pop().unwrap()) + .collect(), + ) +} + +pub fn make_reward_cycle( + burnchain_db: &mut BurnchainDB, + burnchain: &Burnchain, + key: &LeaderKeyRegisterOp, + headers: &mut Vec, + parent_commits: Vec>, +) -> ( + Vec, + Vec>>, +) { + make_reward_cycle_with_vote(burnchain_db, burnchain, key, headers, parent_commits, true) +} + +pub fn make_reward_cycle_without_anchor( + burnchain_db: &mut BurnchainDB, + burnchain: &Burnchain, + key: &LeaderKeyRegisterOp, + headers: &mut Vec, + parent_commits: Vec>, +) -> ( + Vec, + Vec>>, +) { + make_reward_cycle_with_vote(burnchain_db, burnchain, key, headers, parent_commits, false) +} + +#[test] +fn test_read_prepare_phase_commits() { + let first_bhh = BurnchainHeaderHash([0; 32]); + let first_timestamp = 0; + let first_height = 0; + + let mut burnchain = Burnchain::regtest(":memory:"); + burnchain.pox_constants = PoxConstants::new(10, 5, 3, 3, 0, 99, 100); + burnchain.first_block_height = first_height; + burnchain.first_block_hash = first_bhh.clone(); + burnchain.first_block_timestamp = first_timestamp; + + let mut burnchain_db = BurnchainDB::connect(":memory:", &burnchain, true).unwrap(); + + let first_block_header = burnchain_db.get_canonical_chain_tip().unwrap(); + assert_eq!(&first_block_header.block_hash, &first_bhh); + assert_eq!(first_block_header.block_height, first_height); + assert_eq!(first_block_header.timestamp, first_timestamp as u64); + /* + assert_eq!( + &first_block_header.parent_block_hash, + &BurnchainHeaderHash::sentinel() + ); + */ + eprintln!( + "First block parent is {}", + &first_block_header.parent_block_hash + ); + + let mut headers = vec![first_block_header.clone()]; + let key_register = make_simple_key_register(&first_block_header.block_hash, 0, 1); + let (next_headers, commits) = make_simple_reward_cycle( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + None, + ); + + assert_eq!( + commits.len() as u32, + burnchain.pox_constants.reward_cycle_length + ); + assert!(commits[0].is_none()); + for i in 1..burnchain.pox_constants.reward_cycle_length { + assert!(commits[i as usize].is_some()); + } + + let all_ops = read_prepare_phase_commits( + &burnchain_db.tx_begin().unwrap(), + &headers, + &burnchain.pox_constants, + first_block_header.block_height, + 0, + ) + .unwrap(); + assert_eq!(all_ops.len() as u32, burnchain.pox_constants.prepare_length); + for i in 0..burnchain.pox_constants.prepare_length { + assert_eq!(all_ops[i as usize].len(), 1); + + let opdata = &all_ops[i as usize][0]; + assert_eq!( + opdata, + commits[(i + burnchain.pox_constants.reward_cycle_length + - burnchain.pox_constants.prepare_length) as usize] + .as_ref() + .unwrap() + ); + } +} + +#[test] +fn test_parent_block_commits() { + let first_bhh = BurnchainHeaderHash([0; 32]); + let first_timestamp = 0; + let first_height = 0; + + let mut burnchain = Burnchain::regtest(":memory:"); + burnchain.pox_constants = PoxConstants::new(10, 5, 3, 3, 0, 99, 100); + burnchain.first_block_height = first_height; + burnchain.first_block_hash = first_bhh.clone(); + burnchain.first_block_timestamp = first_timestamp; + + let mut burnchain_db = BurnchainDB::connect(":memory:", &burnchain, true).unwrap(); + + let first_block_header = burnchain_db.get_canonical_chain_tip().unwrap(); + + let mut headers = vec![first_block_header.clone()]; + let key_register = make_simple_key_register(&first_block_header.block_hash, 0, 1); + + // first reward cycle is all (linear) commits, so it must elect an anchor block + let (next_headers, commits) = make_simple_reward_cycle( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + None, + ); + + let all_ops = read_prepare_phase_commits( + &burnchain_db.tx_begin().unwrap(), + &headers, + &burnchain.pox_constants, + first_block_header.block_height, + 0, + ) + .unwrap(); + let parent_commits = + read_parent_block_commits(&burnchain_db.tx_begin().unwrap(), &headers, &all_ops).unwrap(); + + // this is a simple reward cycle -- each block-commit has a unique parent + assert_eq!(parent_commits.len(), all_ops.len()); + + for op_list in all_ops.iter() { + for opdata in op_list.iter() { + let mut found_parent = false; + for parent_commit in parent_commits.iter() { + if parent_commit.block_height == (opdata.parent_block_ptr as u64) + && parent_commit.vtxindex == (opdata.parent_vtxindex as u32) + { + found_parent = true; + break; + } + } + assert!(found_parent, "did not find parent for {:?}", opdata); + } + } + + let mut all_ops_with_orphan = all_ops.clone(); + all_ops_with_orphan[1][0].parent_vtxindex += 1; + + let parent_commits = read_parent_block_commits( + &burnchain_db.tx_begin().unwrap(), + &headers, + &all_ops_with_orphan, + ) + .unwrap(); + + // this is a simple reward cycle -- each block-commit has a unique parent, except for the + // orphan + assert_eq!(parent_commits.len(), all_ops_with_orphan.len() - 1); + + let mut all_ops_with_same_parent = all_ops.clone(); + for ops in all_ops_with_same_parent.iter_mut() { + for opdata in ops.iter_mut() { + opdata.parent_block_ptr = 3; + opdata.parent_vtxindex = 0; + } + } + + let parent_commits = read_parent_block_commits( + &burnchain_db.tx_begin().unwrap(), + &headers, + &all_ops_with_same_parent, + ) + .unwrap(); + + assert_eq!(parent_commits.len(), 1); + assert_eq!(parent_commits[0].block_height, 3); + assert_eq!(parent_commits[0].vtxindex, 0); +} + +#[test] +fn test_filter_orphan_block_commits() { + let first_bhh = BurnchainHeaderHash([0; 32]); + let first_timestamp = 0; + let first_height = 0; + + let mut burnchain = Burnchain::regtest(":memory:"); + burnchain.pox_constants = PoxConstants::new(5, 3, 3, 3, 0, 99, 100); + burnchain.first_block_height = first_height; + burnchain.first_block_hash = first_bhh.clone(); + burnchain.first_block_timestamp = first_timestamp; + + let mut burnchain_db = BurnchainDB::connect(":memory:", &burnchain, true).unwrap(); + + let first_block_header = burnchain_db.get_canonical_chain_tip().unwrap(); + + let mut headers = vec![first_block_header.clone()]; + let key_register = make_simple_key_register(&first_block_header.block_hash, 0, 1); + + // first reward cycle is all (linear) commits, so it must elect an anchor block + let (next_headers, commits) = make_simple_reward_cycle( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + None, + ); + + let all_ops = read_prepare_phase_commits( + &burnchain_db.tx_begin().unwrap(), + &headers, + &burnchain.pox_constants, + first_block_header.block_height, + 0, + ) + .unwrap(); + let parent_commits = + read_parent_block_commits(&burnchain_db.tx_begin().unwrap(), &headers, &all_ops).unwrap(); + + let mut all_ops_with_orphan = all_ops.clone(); + all_ops_with_orphan[1][0].parent_vtxindex += 1; + + assert_eq!(all_ops_with_orphan[0].len(), 1); + assert_eq!(all_ops_with_orphan[1].len(), 1); + assert_eq!(all_ops_with_orphan[2].len(), 1); + + let parent_commits = read_parent_block_commits( + &burnchain_db.tx_begin().unwrap(), + &headers, + &all_ops_with_orphan, + ) + .unwrap(); + let filtered_ops = filter_orphan_block_commits(&parent_commits, all_ops_with_orphan); + + assert_eq!(filtered_ops.len(), all_ops.len()); + assert_eq!(filtered_ops[0].len(), 1); + assert_eq!(filtered_ops[1].len(), 0); + assert_eq!(filtered_ops[2].len(), 1); +} + +#[test] +fn test_filter_missed_block_commits() { + let first_bhh = BurnchainHeaderHash([0; 32]); + let first_timestamp = 0; + let first_height = 0; + + let mut burnchain = Burnchain::regtest(":memory:"); + burnchain.pox_constants = PoxConstants::new(5, 3, 3, 3, 0, 99, 100); + burnchain.first_block_height = first_height; + burnchain.first_block_hash = first_bhh.clone(); + burnchain.first_block_timestamp = first_timestamp; + + let mut burnchain_db = BurnchainDB::connect(":memory:", &burnchain, true).unwrap(); + + let first_block_header = burnchain_db.get_canonical_chain_tip().unwrap(); + + let mut headers = vec![first_block_header.clone()]; + let key_register = make_simple_key_register(&first_block_header.block_hash, 0, 1); + + // first reward cycle is all (linear) commits, so it must elect an anchor block + let (next_headers, commits) = make_simple_reward_cycle( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + None, + ); + + let all_ops = read_prepare_phase_commits( + &burnchain_db.tx_begin().unwrap(), + &headers, + &burnchain.pox_constants, + first_block_header.block_height, + 0, + ) + .unwrap(); + let parent_commits = + read_parent_block_commits(&burnchain_db.tx_begin().unwrap(), &headers, &all_ops).unwrap(); + + let mut all_ops_with_missed = all_ops.clone(); + all_ops_with_missed[1][0].burn_parent_modulus -= 1; + + assert_eq!(all_ops_with_missed[0].len(), 1); + assert_eq!(all_ops_with_missed[1].len(), 1); + assert_eq!(all_ops_with_missed[2].len(), 1); + + let parent_commits = read_parent_block_commits( + &burnchain_db.tx_begin().unwrap(), + &headers, + &all_ops_with_missed, + ) + .unwrap(); + let filtered_ops = filter_missed_block_commits(all_ops_with_missed); + + assert_eq!(filtered_ops.len(), all_ops.len()); + assert_eq!(filtered_ops[0].len(), 1); + assert_eq!(filtered_ops[1].len(), 0); + assert_eq!(filtered_ops[2].len(), 1); +} + +#[test] +fn test_find_heaviest_block_commit() { + let first_bhh = BurnchainHeaderHash([0; 32]); + let first_timestamp = 0; + let first_height = 0; + + let mut burnchain = Burnchain::regtest(":memory:"); + burnchain.pox_constants = PoxConstants::new(5, 3, 2, 3, 0, 99, 100); + burnchain.first_block_height = first_height; + burnchain.first_block_hash = first_bhh.clone(); + burnchain.first_block_timestamp = first_timestamp; + + let mut burnchain_db = BurnchainDB::connect(":memory:", &burnchain, true).unwrap(); + + let first_block_header = burnchain_db.get_canonical_chain_tip().unwrap(); + + let mut headers = vec![first_block_header.clone()]; + let key_register = make_simple_key_register(&first_block_header.block_hash, 0, 1); + + // first reward cycle is all (linear) commits, so it must elect an anchor block + let (next_headers, commits) = make_simple_reward_cycle( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + None, + ); + + let all_ops = read_prepare_phase_commits( + &burnchain_db.tx_begin().unwrap(), + &headers, + &burnchain.pox_constants, + first_block_header.block_height, + 0, + ) + .unwrap(); + let parent_commits = + read_parent_block_commits(&burnchain_db.tx_begin().unwrap(), &headers, &all_ops).unwrap(); + let filtered_ops = filter_orphan_block_commits(&parent_commits, all_ops); + + let heaviest_parent_commit_opt = find_heaviest_block_commit( + &burnchain_db.tx_begin().unwrap(), + &headers, + &filtered_ops, + burnchain.pox_constants.anchor_threshold, + ) + .unwrap(); + assert!(heaviest_parent_commit_opt.is_some()); + let (heaviest_parent_block_commit, descendancy, total_confs, total_burns) = + heaviest_parent_commit_opt.unwrap(); + + // since this is just a linear chain of block-commits, the heaviest parent is the parent of the + // first block-commit in the prepare phase + assert_eq!(commits[1].as_ref().unwrap(), &heaviest_parent_block_commit); + assert_eq!(descendancy, vec![vec![true], vec![true], vec![true]]); + assert_eq!(total_confs, 3); + assert_eq!(total_burns, 3 * 10000); + + // make a forked history, but with a best-tip + // 1,0 <-- 2,0 <-- 3,0 <-- 4,0 + // \ + // `---------------------------- 5,0 + let mut all_ops_forked_majority = filtered_ops.clone(); + all_ops_forked_majority[2][0].parent_block_ptr = 1; + all_ops_forked_majority[2][0].parent_vtxindex = 0; + + // still commit 1 + let heaviest_parent_commit_opt = find_heaviest_block_commit( + &burnchain_db.tx_begin().unwrap(), + &headers, + &all_ops_forked_majority, + burnchain.pox_constants.anchor_threshold, + ) + .unwrap(); + assert!(heaviest_parent_commit_opt.is_some()); + let (heaviest_parent_block_commit, descendancy, total_confs, total_burns) = + heaviest_parent_commit_opt.unwrap(); + + assert_eq!(commits[1].as_ref().unwrap(), &heaviest_parent_block_commit); + assert_eq!(descendancy, vec![vec![true], vec![true], vec![false]]); + assert_eq!(total_confs, 2); + assert_eq!(total_burns, 2 * 10000); + + // make a forked history, with another best-tip winner, but with a deeper fork split + // 1,0 <-- 2,0 <-- 3,0 + // \ + // `------- 4,0 <-- 5,0 + let mut all_ops_forked_majority = filtered_ops.clone(); + all_ops_forked_majority[1][0].parent_block_ptr = 2; + all_ops_forked_majority[1][0].parent_vtxindex = 0; + + all_ops_forked_majority[2][0].parent_block_ptr = 2; + all_ops_forked_majority[2][0].parent_vtxindex = 0; + + // still commit 1 + let heaviest_parent_commit_opt = find_heaviest_block_commit( + &burnchain_db.tx_begin().unwrap(), + &headers, + &all_ops_forked_majority, + burnchain.pox_constants.anchor_threshold, + ) + .unwrap(); + assert!(heaviest_parent_commit_opt.is_some()); + let (heaviest_parent_block_commit, descendancy, total_confs, total_burns) = + heaviest_parent_commit_opt.unwrap(); + + assert_eq!(commits[1].as_ref().unwrap(), &heaviest_parent_block_commit); + assert_eq!(descendancy, vec![vec![true], vec![true], vec![true]]); + assert_eq!(total_confs, 3); + assert_eq!(total_burns, 3 * 10000); + + // make a forked history where there is no best tip, but enough confirmations + // 1,0 <-- 2,0 <-- 3,0 + // |\ + // | `------- 4,0 + // \ + // `------------- 5,0 + let mut all_ops_no_majority = filtered_ops.clone(); + all_ops_no_majority[0][0].parent_block_ptr = 2; + all_ops_no_majority[0][0].parent_vtxindex = 0; + all_ops_no_majority[0][0].burn_fee = 0; + + all_ops_no_majority[1][0].parent_block_ptr = 2; + all_ops_no_majority[1][0].parent_vtxindex = 0; + all_ops_no_majority[1][0].burn_fee = 1; + + all_ops_no_majority[2][0].parent_block_ptr = 2; + all_ops_no_majority[2][0].parent_vtxindex = 0; + all_ops_no_majority[2][0].burn_fee = 2; + + let heaviest_parent_commit_opt = find_heaviest_block_commit( + &burnchain_db.tx_begin().unwrap(), + &headers, + &all_ops_no_majority, + burnchain.pox_constants.anchor_threshold, + ) + .unwrap(); + assert!(heaviest_parent_commit_opt.is_some()); + let (heaviest_parent_block_commit, descendancy, total_confs, total_burns) = + heaviest_parent_commit_opt.unwrap(); + + assert_eq!(commits[1].as_ref().unwrap(), &heaviest_parent_block_commit); + assert_eq!(descendancy, vec![vec![true], vec![true], vec![true]]); + assert_eq!(total_confs, 3); + assert_eq!(total_burns, 1 + 2); + + // make a forked history where there is no best tip, but enough (majority) confirmations + // 1,0 <-- 2,0 <-- 3,0 + // | \ + // | `-------- 4,0 + // | + // `----------------------- 5,0 + let mut all_ops_no_majority = filtered_ops.clone(); + all_ops_no_majority[0][0].parent_block_ptr = 2; + all_ops_no_majority[0][0].parent_vtxindex = 0; + all_ops_no_majority[0][0].burn_fee = 0; + + all_ops_no_majority[1][0].parent_block_ptr = 2; + all_ops_no_majority[1][0].parent_vtxindex = 0; + all_ops_no_majority[1][0].burn_fee = 1; + + all_ops_no_majority[2][0].parent_block_ptr = 1; + all_ops_no_majority[2][0].parent_vtxindex = 0; + all_ops_no_majority[2][0].burn_fee = 20; + + let heaviest_parent_commit_opt = find_heaviest_block_commit( + &burnchain_db.tx_begin().unwrap(), + &headers, + &all_ops_no_majority, + burnchain.pox_constants.anchor_threshold, + ) + .unwrap(); + assert!(heaviest_parent_commit_opt.is_some()); + let (heaviest_parent_block_commit, descendancy, total_confs, total_burns) = + heaviest_parent_commit_opt.unwrap(); + + assert_eq!(commits[1].as_ref().unwrap(), &heaviest_parent_block_commit); + assert_eq!(descendancy, vec![vec![true], vec![true], vec![false]]); + assert_eq!(total_confs, 2); + assert_eq!(total_burns, 1); + + // make a history where there is no anchor block, period + // 1,0 <-- 2,0 X-- 3,0 + // + // X------- 4,0 + // + // X------------ 5,0 + let mut all_ops_no_majority = filtered_ops.clone(); + all_ops_no_majority[0][0].parent_block_ptr = 2; + all_ops_no_majority[0][0].parent_vtxindex = 10; + all_ops_no_majority[0][0].burn_fee = 0; + + all_ops_no_majority[1][0].parent_block_ptr = 2; + all_ops_no_majority[1][0].parent_vtxindex = 10; + all_ops_no_majority[1][0].burn_fee = 1; + + all_ops_no_majority[2][0].parent_block_ptr = 1; + all_ops_no_majority[2][0].parent_vtxindex = 10; + all_ops_no_majority[2][0].burn_fee = 20; + + let heaviest_parent_commit_opt = find_heaviest_block_commit( + &burnchain_db.tx_begin().unwrap(), + &headers, + &all_ops_no_majority, + burnchain.pox_constants.anchor_threshold, + ) + .unwrap(); + assert!(heaviest_parent_commit_opt.is_none()); +} + +#[test] +fn test_find_heaviest_parent_commit_many_commits() { + let first_bhh = BurnchainHeaderHash([0; 32]); + let first_timestamp = 0; + let first_height = 0; + + let mut burnchain = Burnchain::regtest(":memory:"); + burnchain.pox_constants = PoxConstants::new(5, 3, 2, 3, 0, 99, 100); + burnchain.first_block_height = first_height; + burnchain.first_block_hash = first_bhh.clone(); + burnchain.first_block_timestamp = first_timestamp; + + let mut burnchain_db = BurnchainDB::connect(":memory:", &burnchain, true).unwrap(); + + let first_block_header = burnchain_db.get_canonical_chain_tip().unwrap(); + + let mut headers = vec![first_block_header.clone()]; + let key_register = make_simple_key_register(&first_block_header.block_hash, 0, 1); + + let (next_headers, commits) = make_reward_cycle( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + vec![None, None], + ); + + let all_ops = read_prepare_phase_commits( + &burnchain_db.tx_begin().unwrap(), + &headers, + &burnchain.pox_constants, + first_block_header.block_height, + 0, + ) + .unwrap(); + let parent_commits = + read_parent_block_commits(&burnchain_db.tx_begin().unwrap(), &headers, &all_ops).unwrap(); + let filtered_ops = filter_orphan_block_commits(&parent_commits, all_ops); + + // make a history with two miners' commits. + // sortition winners in prepare phase were 3,0; 4,1; 5,0 + // 1,0 <-- 2,0 <--- 3,0 <--- 4,0 ,--- 5,0 + // \ \ / + // `---- 3,1 `--- 4,1 <--- 5,1 + let mut all_ops_no_majority = filtered_ops.clone(); + + // 3,0 + all_ops_no_majority[0][0].parent_block_ptr = 2; + all_ops_no_majority[0][0].parent_vtxindex = 0; + all_ops_no_majority[0][0].vtxindex = 0; + all_ops_no_majority[0][0].burn_fee = 1; + + // 3,1 + all_ops_no_majority[0][1].parent_block_ptr = 2; + all_ops_no_majority[0][1].parent_vtxindex = 0; + all_ops_no_majority[0][1].vtxindex = 1; + all_ops_no_majority[0][1].burn_fee = 1; + + // 4,0 + all_ops_no_majority[1][0].parent_block_ptr = 3; + all_ops_no_majority[1][0].parent_vtxindex = 0; + all_ops_no_majority[1][0].vtxindex = 0; + all_ops_no_majority[1][0].burn_fee = 2; + + // 4,1 + all_ops_no_majority[1][1].parent_block_ptr = 3; + all_ops_no_majority[1][1].parent_vtxindex = 0; + all_ops_no_majority[1][1].vtxindex = 1; + all_ops_no_majority[1][1].burn_fee = 2; + + // 5,0 + all_ops_no_majority[2][0].parent_block_ptr = 4; + all_ops_no_majority[2][0].parent_vtxindex = 1; + all_ops_no_majority[2][0].vtxindex = 0; + all_ops_no_majority[2][0].burn_fee = 3; + + // 5,1 + all_ops_no_majority[2][1].parent_block_ptr = 4; + all_ops_no_majority[2][1].parent_vtxindex = 1; + all_ops_no_majority[2][1].vtxindex = 1; + all_ops_no_majority[2][1].burn_fee = 3; + + let heaviest_parent_commit_opt = find_heaviest_block_commit( + &burnchain_db.tx_begin().unwrap(), + &headers, + &all_ops_no_majority, + burnchain.pox_constants.anchor_threshold, + ) + .unwrap(); + assert!(heaviest_parent_commit_opt.is_some()); + let (heaviest_parent_block_commit, descendancy, total_confs, total_burns) = + heaviest_parent_commit_opt.unwrap(); + + assert_eq!( + commits[1][0].as_ref().unwrap(), + &heaviest_parent_block_commit + ); + assert_eq!( + descendancy, + vec![vec![true, true], vec![true, true], vec![true, true]] + ); + assert_eq!(total_confs, 3); + assert_eq!(total_burns, 1 + 1 + 2 + 2 + 3 + 3); + + // make a history with two miners' commits, with some invalid commits. + // The heavier commit descendancy wins -- 2,1 is the anchor block. + // 1,0 <-- 2,0 <--- 3,0 <--- 4,0 <--- 5,0 (winner) + // \ + // `---- 2,1 <--- 3,1 <--- 4,1 <--- 5,1 + let mut all_ops_no_majority = filtered_ops.clone(); + + // 3,0 + all_ops_no_majority[0][0].parent_block_ptr = 2; + all_ops_no_majority[0][0].parent_vtxindex = 0; + all_ops_no_majority[0][0].vtxindex = 0; + all_ops_no_majority[0][0].burn_fee = 1; + + // 3,1 + all_ops_no_majority[0][1].parent_block_ptr = 2; + all_ops_no_majority[0][1].parent_vtxindex = 1; + all_ops_no_majority[0][1].vtxindex = 1; + all_ops_no_majority[0][1].burn_fee = 1; + + // 4,0 + all_ops_no_majority[1][0].parent_block_ptr = 3; + all_ops_no_majority[1][0].parent_vtxindex = 0; + all_ops_no_majority[1][0].vtxindex = 0; + all_ops_no_majority[1][0].burn_fee = 2; + + // 4,1 + all_ops_no_majority[1][1].parent_block_ptr = 3; + all_ops_no_majority[1][1].parent_vtxindex = 1; + all_ops_no_majority[1][1].vtxindex = 1; + all_ops_no_majority[1][1].burn_fee = 2; + + // 5,0 + all_ops_no_majority[2][0].parent_block_ptr = 4; + all_ops_no_majority[2][0].parent_vtxindex = 0; + all_ops_no_majority[2][0].vtxindex = 0; + all_ops_no_majority[2][0].burn_fee = 4; + + // 5,1 + all_ops_no_majority[2][1].parent_block_ptr = 4; + all_ops_no_majority[2][1].parent_vtxindex = 1; + all_ops_no_majority[2][1].vtxindex = 1; + all_ops_no_majority[2][1].burn_fee = 3; + + let heaviest_parent_commit_opt = find_heaviest_block_commit( + &burnchain_db.tx_begin().unwrap(), + &headers, + &all_ops_no_majority, + burnchain.pox_constants.anchor_threshold, + ) + .unwrap(); + assert!(heaviest_parent_commit_opt.is_some()); + let (heaviest_parent_block_commit, descendancy, total_confs, total_burns) = + heaviest_parent_commit_opt.unwrap(); + + // best option wins + assert_eq!( + commits[1][0].as_ref().unwrap(), + &heaviest_parent_block_commit + ); + assert_eq!( + descendancy, + vec![vec![true, false], vec![true, false], vec![true, false]] + ); + assert_eq!(total_confs, 3); + assert_eq!(total_burns, 1 + 2 + 4); + + // make a history with two miners' commits, with some invalid commits. + // commit descendancy weight is a tie, so highest commit is the anchor block (2,1) + // 1,0 <-- 2,0 <--- 3,0 <--- 4,0 <--- 5,0 + // \ + // `---- 2,1 <--- 3,1 <--- 4,1 <--- 5,1 (winner) + let mut all_ops_no_majority = filtered_ops.clone(); + + // 3,0 + all_ops_no_majority[0][0].parent_block_ptr = 2; + all_ops_no_majority[0][0].parent_vtxindex = 0; + all_ops_no_majority[0][0].vtxindex = 0; + all_ops_no_majority[0][0].burn_fee = 1; + + // 3,1 + all_ops_no_majority[0][1].parent_block_ptr = 2; + all_ops_no_majority[0][1].parent_vtxindex = 1; + all_ops_no_majority[0][1].vtxindex = 1; + all_ops_no_majority[0][1].burn_fee = 1; + + // 4,0 + all_ops_no_majority[1][0].parent_block_ptr = 3; + all_ops_no_majority[1][0].parent_vtxindex = 0; + all_ops_no_majority[1][0].vtxindex = 0; + all_ops_no_majority[1][0].burn_fee = 2; + + // 4,1 + all_ops_no_majority[1][1].parent_block_ptr = 3; + all_ops_no_majority[1][1].parent_vtxindex = 1; + all_ops_no_majority[1][1].vtxindex = 1; + all_ops_no_majority[1][1].burn_fee = 2; + + // 5,0 + all_ops_no_majority[2][0].parent_block_ptr = 4; + all_ops_no_majority[2][0].parent_vtxindex = 0; + all_ops_no_majority[2][0].vtxindex = 0; + all_ops_no_majority[2][0].burn_fee = 3; + + // 5,1 + all_ops_no_majority[2][1].parent_block_ptr = 4; + all_ops_no_majority[2][1].parent_vtxindex = 1; + all_ops_no_majority[2][1].vtxindex = 1; + all_ops_no_majority[2][1].burn_fee = 3; + + let heaviest_parent_commit_opt = find_heaviest_block_commit( + &burnchain_db.tx_begin().unwrap(), + &headers, + &all_ops_no_majority, + burnchain.pox_constants.anchor_threshold, + ) + .unwrap(); + assert!(heaviest_parent_commit_opt.is_some()); + let (heaviest_parent_block_commit, descendancy, total_confs, total_burns) = + heaviest_parent_commit_opt.unwrap(); + + // best option wins + assert_eq!( + commits[1][1].as_ref().unwrap(), + &heaviest_parent_block_commit + ); + assert_eq!( + descendancy, + vec![vec![false, true], vec![false, true], vec![false, true]] + ); + assert_eq!(total_confs, 3); + assert_eq!(total_burns, 1 + 2 + 3); +} + +#[test] +fn test_update_pox_affirmation_maps_3_forks() { + let first_bhh = BurnchainHeaderHash([0; 32]); + let first_timestamp = 0; + let first_height = 0; + + let mut burnchain = Burnchain::regtest(":memory:"); + burnchain.pox_constants = PoxConstants::new(10, 5, 3, 3, 0, 99, 100); + burnchain.first_block_height = first_height; + burnchain.first_block_hash = first_bhh.clone(); + burnchain.first_block_timestamp = first_timestamp; + + let mut burnchain_db = BurnchainDB::connect(":memory:", &burnchain, true).unwrap(); + + let first_block_header = burnchain_db.get_canonical_chain_tip().unwrap(); + + let mut headers = vec![first_block_header.clone()]; + let key_register = make_simple_key_register(&first_block_header.block_hash, 0, 1); + + // first reward cycle is all (linear) commits, so it must elect an anchor block + let (next_headers, commits_0) = make_reward_cycle( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + vec![None], + ); + + // no anchor blocks recorded, yet! + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| true) + .unwrap(); + eprintln!( + "rc=0: before update: heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(heaviest_am, AffirmationMap::decode("").unwrap()); + assert_eq!(canonical_am, AffirmationMap::decode("n").unwrap()); + + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .is_none()); + + update_pox_affirmation_maps(&mut burnchain_db, &headers, 0, &burnchain).unwrap(); + + // there's only one anchor block + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .is_some()); + + // the anchor block itself affirms nothing, since it isn't built on an anchor block + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| true) + .unwrap(); + eprintln!( + "rc=0: after update: heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(heaviest_am, AffirmationMap::decode("").unwrap()); + assert_eq!(canonical_am, AffirmationMap::decode("p").unwrap()); + + let anchor_block_0 = BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .unwrap() + .0; + eprintln!("anchor block 1 at height {}", anchor_block_0.block_height); + assert!(anchor_block_0.block_height < commits_0[7][0].as_ref().unwrap().block_height); + + // descend from a prepare-phase commit in rc 0, so affirms rc 0's anchor block + let (next_headers, commits_1) = make_reward_cycle( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + vec![commits_0[7][0].clone()], + ); + update_pox_affirmation_maps(&mut burnchain_db, &headers, 1, &burnchain).unwrap(); + + // there's two anchor blocks + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .is_some()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 2) + .unwrap() + .is_some()); + + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| true) + .unwrap(); + eprintln!( + "rc=1: heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(heaviest_am, AffirmationMap::decode("p").unwrap()); + assert_eq!(canonical_am, AffirmationMap::decode("pp").unwrap()); + + // descend from a prepare-phase commit in rc 0, so affirms rc 0's anchor block but not rc + // 1's + assert!(anchor_block_0.block_height < commits_0[6][0].as_ref().unwrap().block_height); + let (next_headers, commits_2) = make_reward_cycle( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + vec![commits_0[6][0].clone()], + ); + update_pox_affirmation_maps(&mut burnchain_db, &headers, 2, &burnchain).unwrap(); + + // there's three anchor blocks + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .is_some()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 2) + .unwrap() + .is_some()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 3) + .unwrap() + .is_some()); + + // there are two equivalently heavy affirmation maps, but the affirmation map discovered later + // is the heaviest. + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| true) + .unwrap(); + eprintln!( + "rc=2: heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(heaviest_am, AffirmationMap::decode("pa").unwrap()); + assert_eq!(canonical_am, AffirmationMap::decode("pap").unwrap()); + + // descend from a prepare-phase commit in rc 0, so affirms rc 0's anchor block, but not rc + // 1's or rc 2's + assert!(anchor_block_0.block_height < commits_0[8][0].as_ref().unwrap().block_height); + let (next_headers, commits_3) = make_reward_cycle( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + vec![commits_0[8][0].clone()], + ); + update_pox_affirmation_maps(&mut burnchain_db, &headers, 3, &burnchain).unwrap(); + + // there are three equivalently heavy affirmation maps, but the affirmation map discovered last + // is the heaviest. + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| true) + .unwrap(); + eprintln!( + "rc=3: heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(heaviest_am, AffirmationMap::decode("paa").unwrap()); + assert_eq!(canonical_am, AffirmationMap::decode("paap").unwrap()); +} + +#[test] +fn test_update_pox_affirmation_maps_unique_anchor_block() { + let first_bhh = BurnchainHeaderHash([0; 32]); + let first_timestamp = 0; + let first_height = 0; + + let mut burnchain = Burnchain::regtest(":memory:"); + burnchain.pox_constants = PoxConstants::new(10, 5, 3, 3, 0, 99, 100); + burnchain.first_block_height = first_height; + burnchain.first_block_hash = first_bhh.clone(); + burnchain.first_block_timestamp = first_timestamp; + + let mut burnchain_db = BurnchainDB::connect(":memory:", &burnchain, true).unwrap(); + + let first_block_header = burnchain_db.get_canonical_chain_tip().unwrap(); + + let mut headers = vec![first_block_header.clone()]; + let key_register = make_simple_key_register(&first_block_header.block_hash, 0, 1); + + // first reward cycle is all (linear) commits, so it must elect an anchor block + let (next_headers, commits_0) = make_reward_cycle( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + vec![None], + ); + + // no anchor blocks recorded, yet! + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| true) + .unwrap(); + eprintln!( + "rc=0: before update: heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(heaviest_am, AffirmationMap::decode("").unwrap()); + assert_eq!(canonical_am, AffirmationMap::decode("n").unwrap()); + + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .is_none()); + + update_pox_affirmation_maps(&mut burnchain_db, &headers, 0, &burnchain).unwrap(); + + // there's only one anchor block + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .is_some()); + + // the anchor block itself affirms nothing, since it isn't built on an anchor block + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| true) + .unwrap(); + eprintln!( + "rc=0: after update: heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(heaviest_am, AffirmationMap::decode("").unwrap()); + assert_eq!(canonical_am, AffirmationMap::decode("p").unwrap()); + + let anchor_block_0 = BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .unwrap() + .0; + eprintln!("anchor block 1 at height {}", anchor_block_0.block_height); + assert!(anchor_block_0.block_height < commits_0[7][0].as_ref().unwrap().block_height); + + // try and select the same anchor block, twice + let mut dup_commits = commits_0.clone(); + for (i, cmts) in dup_commits.iter_mut().enumerate() { + let block_header = BurnchainBlockHeader { + block_height: (i + commits_0.len() + 1) as u64, + block_hash: next_burn_header_hash(), + parent_block_hash: headers + .last() + .map(|blk| blk.block_hash.clone()) + .unwrap_or(first_bhh.clone()), + num_txs: cmts.len() as u64, + timestamp: (i + commits_0.len()) as u64, + }; + + for cmt_opt in cmts.iter_mut() { + if let Some(cmt) = cmt_opt.as_mut() { + cmt.block_height = block_header.block_height; + cmt.parent_block_ptr = anchor_block_0.block_height as u32; + cmt.parent_vtxindex = anchor_block_0.vtxindex as u16; + cmt.burn_parent_modulus = + ((cmt.block_height - 1) % BURN_BLOCK_MINED_AT_MODULUS) as u8; + cmt.burn_header_hash = block_header.block_hash.clone(); + cmt.block_header_hash = next_block_hash(); + } + } + + headers.push(block_header.clone()); + + let cmt_ops: Vec = cmts + .iter() + .filter_map(|op| op.clone()) + .map(|op| BlockstackOperationType::LeaderBlockCommit(op)) + .collect(); + + burnchain_db + .store_new_burnchain_block_ops_unchecked(&burnchain, &headers, &block_header, &cmt_ops) + .unwrap(); + } + + update_pox_affirmation_maps(&mut burnchain_db, &headers, 1, &burnchain).unwrap(); + + // there's still only one anchor blocks + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .is_some()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 2) + .unwrap() + .is_none()); + + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| true) + .unwrap(); + eprintln!( + "rc=1: heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(heaviest_am, AffirmationMap::decode("").unwrap()); + assert_eq!(canonical_am, AffirmationMap::decode("pn").unwrap()); +} + +#[test] +fn test_update_pox_affirmation_maps_absent() { + let first_bhh = BurnchainHeaderHash([0; 32]); + let first_timestamp = 0; + let first_height = 0; + + let mut burnchain = Burnchain::regtest(":memory:"); + burnchain.pox_constants = PoxConstants::new(10, 5, 3, 3, 0, 99, 100); + burnchain.first_block_height = first_height; + burnchain.first_block_hash = first_bhh.clone(); + burnchain.first_block_timestamp = first_timestamp; + + let mut burnchain_db = BurnchainDB::connect(":memory:", &burnchain, true).unwrap(); + + let first_block_header = burnchain_db.get_canonical_chain_tip().unwrap(); + + let mut headers = vec![first_block_header.clone()]; + let key_register = make_simple_key_register(&first_block_header.block_hash, 0, 1); + + // make two histories -- one with an anchor block, and one without. + let (next_headers, commits_0) = make_reward_cycle( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + vec![None, None], + ); + + // no anchor blocks recorded, yet! + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + assert_eq!(heaviest_am, AffirmationMap::empty()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .is_none()); + + update_pox_affirmation_maps(&mut burnchain_db, &headers, 0, &burnchain).unwrap(); + + // there's only one anchor block, and it's at vtxindex 1 (not 0) + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) + .unwrap() + .is_none()); + assert_eq!( + BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .unwrap() + .0 + .vtxindex, + 1 + ); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 2) + .unwrap() + .is_none()); + + // the anchor block itself affirms nothing + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| true) + .unwrap(); + eprintln!( + "rc=0: heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(heaviest_am, AffirmationMap::decode("").unwrap()); + assert_eq!(canonical_am, AffirmationMap::decode("p").unwrap()); + + for i in 5..10 { + let block_commit = BurnchainDB::get_block_commit( + burnchain_db.conn(), + &commits_0[i][0].as_ref().unwrap().txid, + ) + .unwrap() + .unwrap(); + assert_eq!(block_commit.vtxindex, 0); + + let block_commit_metadata = BurnchainDB::get_commit_metadata( + burnchain_db.conn(), + &block_commit.burn_header_hash, + &block_commit.txid, + ) + .unwrap() + .unwrap(); + assert_eq!(block_commit_metadata.anchor_block_descendant, None); + } + + // build a second reward cycle off of a commit that does _not_ affirm the first anchor + // block + let (next_headers, commits_1) = make_reward_cycle( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + vec![commits_0[9][1].clone(), commits_0[9][0].clone()], + ); + update_pox_affirmation_maps(&mut burnchain_db, &headers, 1, &burnchain).unwrap(); + + // the second anchor block affirms that the first anchor block is missing. + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| true) + .unwrap(); + eprintln!( + "rc=1: heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(heaviest_am, AffirmationMap::decode("a").unwrap()); + assert_eq!(canonical_am, AffirmationMap::decode("ap").unwrap()); + + // build a third reward cycle off of a commit in the second reward cycle, but make it so + // that there is no anchor block mined + let (next_headers, commits_2) = make_reward_cycle_without_anchor( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + vec![commits_1[9][0].clone(), commits_1[9][1].clone()], + ); + update_pox_affirmation_maps(&mut burnchain_db, &headers, 2, &burnchain).unwrap(); + + // there isn't a third anchor block + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .is_some()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 2) + .unwrap() + .is_some()); + + // heaviest _anchor block_ affirmation map is unchanged. + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| true) + .unwrap(); + eprintln!( + "rc=2: heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(heaviest_am, AffirmationMap::decode("a").unwrap()); + assert_eq!(canonical_am, AffirmationMap::decode("apn").unwrap()); + + // build a fourth reward cycle off of a commit in the third reward cycle, but make it so + // that there is no anchor block mined + assert!(commits_2[5][0].is_some()); + assert!(commits_2[5][1].is_some()); + let (next_headers, commits_3) = make_reward_cycle_without_anchor( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + vec![commits_2[5][0].clone(), commits_2[5][1].clone()], + ); + update_pox_affirmation_maps(&mut burnchain_db, &headers, 3, &burnchain).unwrap(); + + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .is_some()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 2) + .unwrap() + .is_some()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 3) + .unwrap() + .is_none()); + + // heaviest _anchor block_ affirmation map is unchanged. + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| true) + .unwrap(); + eprintln!( + "rc=3: heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(heaviest_am, AffirmationMap::decode("a").unwrap()); + assert_eq!(canonical_am, AffirmationMap::decode("apnn").unwrap()); + + // make a fourth fifth cycle, again with a missing anchor block + assert!(commits_3[5][0].is_some()); + assert!(commits_3[5][1].is_some()); + let (next_headers, commits_4) = make_reward_cycle_without_anchor( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + vec![commits_3[5][0].clone(), commits_3[5][1].clone()], + ); + update_pox_affirmation_maps(&mut burnchain_db, &headers, 4, &burnchain).unwrap(); + + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .is_some()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 2) + .unwrap() + .is_some()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 3) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 4) + .unwrap() + .is_none()); + + // heaviest _anchor block_ affirmation map advances + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| true) + .unwrap(); + eprintln!( + "rc=4: heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(heaviest_am, AffirmationMap::decode("a").unwrap()); + assert_eq!(canonical_am, AffirmationMap::decode("apnnn").unwrap()); + + // make a fifth reward cycle, but with an anchor block. Affirms the first anchor block by + // descending from a chain that descends from it. + assert!(commits_4[5][0].is_some()); + assert!(commits_4[5][1].is_some()); + let (next_headers, commits_5) = make_reward_cycle( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + vec![commits_4[5][1].clone(), commits_4[5][0].clone()], + ); + update_pox_affirmation_maps(&mut burnchain_db, &headers, 5, &burnchain).unwrap(); + + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .is_some()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 2) + .unwrap() + .is_some()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 3) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 4) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 5) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 6) + .unwrap() + .is_some()); + + // heaviest _anchor block_ affirmation map advances, since the new anchor block affirms the + // last 4 reward cycles, including the anchor block mined in the first reward cycle + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| true) + .unwrap(); + eprintln!( + "rc=5: heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + // anchor block was chosen in the last reward cycle, and in doing so created the heaviest + // affirmation map for an anchor block, so the canonical affirmation map is + // whatever that last anchor block affirmed + assert_eq!(heaviest_am, AffirmationMap::decode("pannn").unwrap()); + assert_eq!(canonical_am, AffirmationMap::decode("pannnp").unwrap()); + + // make a third history that affirms _nothing_. It should eventually overtake this last + // heaviest affirmation map + let mut start = vec![commits_0[3][1].clone()]; + for i in 0..6 { + let (next_headers, commits) = make_reward_cycle_with_vote( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + start, + false, + ); + update_pox_affirmation_maps(&mut burnchain_db, &headers, 6 + i, &burnchain).unwrap(); + start = vec![commits[5][0].clone()]; + + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| { + true + }) + .unwrap(); + eprintln!( + "rc={}: heaviest = {}, canonical = {}", + 6 + i, + &heaviest_am, + &canonical_am + ); + } + + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| true) + .unwrap(); + eprintln!( + "rc=11: heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(heaviest_am, AffirmationMap::decode("pannn").unwrap()); + assert_eq!( + canonical_am, + AffirmationMap::decode("pannnpnnnnnn").unwrap() + ); + + // other affirmation map should be present + let unaffirmed_am = AffirmationMap::decode("aannnannnnnn").unwrap(); + let am_id = BurnchainDB::get_affirmation_map_id(burnchain_db.conn(), &unaffirmed_am) + .unwrap() + .unwrap(); + let weight = BurnchainDB::get_affirmation_weight(burnchain_db.conn(), am_id) + .unwrap() + .unwrap(); + assert_eq!(weight, 9); +} + +#[test] +fn test_update_pox_affirmation_maps_nothing() { + let first_bhh = BurnchainHeaderHash([0; 32]); + let first_timestamp = 0; + let first_height = 0; + + let mut burnchain = Burnchain::regtest(":memory:"); + burnchain.pox_constants = PoxConstants::new(10, 5, 3, 3, 0, 99, 100); + burnchain.first_block_height = first_height; + burnchain.first_block_hash = first_bhh.clone(); + burnchain.first_block_timestamp = first_timestamp; + + let mut burnchain_db = BurnchainDB::connect(":memory:", &burnchain, true).unwrap(); + + let first_block_header = burnchain_db.get_canonical_chain_tip().unwrap(); + + let mut headers = vec![first_block_header.clone()]; + let key_register = make_simple_key_register(&first_block_header.block_hash, 0, 1); + + // first reward cycle is all (linear) commits, so it must elect an anchor block + let (next_headers, commits_0) = make_reward_cycle( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + vec![None], + ); + + // no anchor blocks recorded, yet! + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + assert_eq!(heaviest_am, AffirmationMap::empty()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .is_none()); + + update_pox_affirmation_maps(&mut burnchain_db, &headers, 0, &burnchain).unwrap(); + + // there's only one anchor block + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .is_some()); + + // the anchor block itself affirms nothing, since it isn't built on an anchor block + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| true) + .unwrap(); + eprintln!( + "rc=0: heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(heaviest_am, AffirmationMap::decode("").unwrap()); + assert_eq!(canonical_am, AffirmationMap::decode("p").unwrap()); + + // build a second reward cycle off of the first, but with no anchor block + let (next_headers, commits_1) = make_reward_cycle_with_vote( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + vec![commits_0[9][0].clone()], + false, + ); + update_pox_affirmation_maps(&mut burnchain_db, &headers, 1, &burnchain).unwrap(); + + // there's still one anchor block + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .is_some()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 2) + .unwrap() + .is_none()); + + // second reward cycle doesn't have an anchor block, so there's no heaviest anchor block + // affirmation map yet + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| true) + .unwrap(); + eprintln!( + "rc=1: heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(heaviest_am, AffirmationMap::decode("").unwrap()); + assert_eq!(canonical_am, AffirmationMap::decode("pn").unwrap()); + + // build a 3rd reward cycle, but it affirms an anchor block + let last_commit_1 = { + let mut last_commit = None; + for i in 0..commits_1.len() { + if commits_1[i][0].is_some() { + last_commit = commits_1[i][0].clone(); + } + } + last_commit + }; + + let (next_headers, commits_2) = make_reward_cycle( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + vec![last_commit_1], + ); + update_pox_affirmation_maps(&mut burnchain_db, &headers, 2, &burnchain).unwrap(); + + // there's two anchor blocks + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .is_some()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 2) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 3) + .unwrap() + .is_some()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 4) + .unwrap() + .is_none()); + + // there's no anchor block in rc 1 + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| true) + .unwrap(); + eprintln!( + "rc=2: heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(heaviest_am, AffirmationMap::decode("pn").unwrap()); + assert_eq!(canonical_am, AffirmationMap::decode("pnp").unwrap()); + + // build a fourth reward cycle, with no vote + let (next_headers, commits_3) = make_reward_cycle_with_vote( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + vec![commits_2[9][0].clone()], + false, + ); + update_pox_affirmation_maps(&mut burnchain_db, &headers, 3, &burnchain).unwrap(); + + // there are three equivalently heavy affirmation maps, but the affirmation map discovered last + // is the heaviest. BUT THIS TIME, MAKE THE UNCONFIRMED ORACLE DENY THAT THIS LAST + // ANCHORED BLOCK EXISTS. + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| false) + .unwrap(); + eprintln!( + "rc=3 (deny): heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(heaviest_am, AffirmationMap::decode("pn").unwrap()); + assert_eq!(canonical_am, AffirmationMap::decode("pnan").unwrap()); + + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| true) + .unwrap(); + eprintln!( + "rc=3 (exist): heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(heaviest_am, AffirmationMap::decode("pn").unwrap()); + assert_eq!(canonical_am, AffirmationMap::decode("pnpn").unwrap()); +} + +#[test] +fn test_update_pox_affirmation_fork_2_cycles() { + let first_bhh = BurnchainHeaderHash([0; 32]); + let first_timestamp = 0; + let first_height = 0; + + let mut burnchain = Burnchain::regtest(":memory:"); + burnchain.pox_constants = PoxConstants::new(5, 2, 2, 25, 5, 99, 100); + burnchain.first_block_height = first_height; + burnchain.first_block_hash = first_bhh.clone(); + burnchain.first_block_timestamp = first_timestamp; + + let mut burnchain_db = BurnchainDB::connect(":memory:", &burnchain, true).unwrap(); + + let first_block_header = burnchain_db.get_canonical_chain_tip().unwrap(); + + let mut headers = vec![first_block_header.clone()]; + let key_register = make_simple_key_register(&first_block_header.block_hash, 0, 1); + + // first reward cycle is all (linear) commits, so it must elect an anchor block + let (next_headers, commits_0) = make_reward_cycle( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + vec![None], + ); + + // no anchor blocks recorded, yet! + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + assert_eq!(heaviest_am, AffirmationMap::empty()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .is_none()); + + update_pox_affirmation_maps(&mut burnchain_db, &headers, 0, &burnchain).unwrap(); + + // there's only one anchor block + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .is_some()); + + // the anchor block itself affirms nothing, since it isn't built on an anchor block + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| true) + .unwrap(); + eprintln!( + "rc=0 (true): heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(heaviest_am, AffirmationMap::decode("").unwrap()); + assert_eq!(canonical_am, AffirmationMap::decode("p").unwrap()); + + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| false) + .unwrap(); + eprintln!( + "rc=0 (false): heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(canonical_am, AffirmationMap::decode("a").unwrap()); + + // build a second reward cycle off of the first + let (next_headers, commits_1) = make_reward_cycle( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + vec![commits_0[4][0].clone()], + ); + update_pox_affirmation_maps(&mut burnchain_db, &headers, 1, &burnchain).unwrap(); + + // there's two anchor blocks + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .is_some()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 2) + .unwrap() + .is_some()); + + // the network affirms two anchor blocks, but the second anchor block only affirms the + // first anchor block. + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| true) + .unwrap(); + eprintln!( + "rc=1 (true): heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(heaviest_am, AffirmationMap::decode("p").unwrap()); + assert_eq!(canonical_am, AffirmationMap::decode("pp").unwrap()); + + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| false) + .unwrap(); + eprintln!( + "rc=1 (false): heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(canonical_am, AffirmationMap::decode("pa").unwrap()); + + // build a third reward cycle off of the first, before the 2nd's anchor block + let (next_headers, commits_2) = make_reward_cycle( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + vec![commits_0[1][0].clone()], + ); + update_pox_affirmation_maps(&mut burnchain_db, &headers, 2, &burnchain).unwrap(); + + // there's four anchor blocks + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .is_some()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 2) + .unwrap() + .is_some()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 3) + .unwrap() + .is_some()); + + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| true) + .unwrap(); + eprintln!( + "rc=2 (true): heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(heaviest_am, AffirmationMap::decode("p").unwrap()); + assert_eq!(canonical_am, AffirmationMap::decode("ppp").unwrap()); + + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| false) + .unwrap(); + eprintln!( + "rc=2 (false): heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(canonical_am, AffirmationMap::decode("paa").unwrap()); + + // build a fourth reward cycle off of the third + let (next_headers, commits_3) = make_reward_cycle( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + vec![commits_2[4][0].clone()], + ); + update_pox_affirmation_maps(&mut burnchain_db, &headers, 3, &burnchain).unwrap(); + + // there's four anchor blocks + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .is_some()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 2) + .unwrap() + .is_some()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 3) + .unwrap() + .is_some()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 4) + .unwrap() + .is_some()); + + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| true) + .unwrap(); + eprintln!( + "rc=3: heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(heaviest_am, AffirmationMap::decode("aap").unwrap()); + assert_eq!(canonical_am, AffirmationMap::decode("aapp").unwrap()); +} + +#[test] +fn test_update_pox_affirmation_fork_duel() { + let first_bhh = BurnchainHeaderHash([0; 32]); + let first_timestamp = 0; + let first_height = 0; + + let mut burnchain = Burnchain::regtest(":memory:"); + burnchain.pox_constants = PoxConstants::new(5, 2, 2, 25, 5, 99, 100); + burnchain.first_block_height = first_height; + burnchain.first_block_hash = first_bhh.clone(); + burnchain.first_block_timestamp = first_timestamp; + + let mut burnchain_db = BurnchainDB::connect(":memory:", &burnchain, true).unwrap(); + + let first_block_header = burnchain_db.get_canonical_chain_tip().unwrap(); + + let mut headers = vec![first_block_header.clone()]; + let key_register = make_simple_key_register(&first_block_header.block_hash, 0, 1); + + // first reward cycle is all (linear) commits, so it must elect an anchor block + let (next_headers, commits_0) = make_reward_cycle( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + vec![None], + ); + + // no anchor blocks recorded, yet! + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + assert_eq!(heaviest_am, AffirmationMap::empty()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .is_none()); + + update_pox_affirmation_maps(&mut burnchain_db, &headers, 0, &burnchain).unwrap(); + + // there's only one anchor block + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .is_some()); + + // the anchor block itself affirms nothing, since it isn't built on an anchor block + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| true) + .unwrap(); + eprintln!( + "rc=0: heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(heaviest_am, AffirmationMap::decode("").unwrap()); + assert_eq!(canonical_am, AffirmationMap::decode("p").unwrap()); + + // build a second reward cycle off of the first, but at the start + assert!(commits_0[1][0].is_some()); + let (next_headers, commits_1) = make_reward_cycle( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + vec![commits_0[1][0].clone()], + ); + update_pox_affirmation_maps(&mut burnchain_db, &headers, 1, &burnchain).unwrap(); + + // there's two anchor blocks + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .is_some()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 2) + .unwrap() + .is_some()); + + // the network affirms two anchor blocks, but the second one wins + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| true) + .unwrap(); + eprintln!( + "rc=1: heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(heaviest_am, AffirmationMap::decode("a").unwrap()); + assert_eq!(canonical_am, AffirmationMap::decode("ap").unwrap()); + + // build a third reward cycle off of the first + assert!(commits_0[4][0].clone().unwrap().block_height == 5); + let (next_headers, commits_2) = make_reward_cycle( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + vec![commits_0[4][0].clone()], + ); + update_pox_affirmation_maps(&mut burnchain_db, &headers, 2, &burnchain).unwrap(); + + // there's four anchor blocks + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .is_some()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 2) + .unwrap() + .is_some()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 3) + .unwrap() + .is_some()); + + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| true) + .unwrap(); + eprintln!( + "rc=2: heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(heaviest_am, AffirmationMap::decode("pa").unwrap()); + assert_eq!(canonical_am, AffirmationMap::decode("pap").unwrap()); + + // build a fourth reward cycle off of the second + assert!(commits_1[4][0].clone().unwrap().block_height == 10); + let (next_headers, commits_3) = make_reward_cycle( + &mut burnchain_db, + &burnchain, + &key_register, + &mut headers, + vec![commits_1[4][0].clone()], + ); + update_pox_affirmation_maps(&mut burnchain_db, &headers, 3, &burnchain).unwrap(); + + // there's four anchor blocks + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) + .unwrap() + .is_none()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .is_some()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 2) + .unwrap() + .is_some()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 3) + .unwrap() + .is_some()); + assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 4) + .unwrap() + .is_some()); + + let heaviest_am = + BurnchainDB::get_heaviest_anchor_block_affirmation_map(burnchain_db.conn(), &burnchain) + .unwrap(); + let canonical_am = + BurnchainDB::get_canonical_affirmation_map(burnchain_db.conn(), &burnchain, |_, _| true) + .unwrap(); + eprintln!( + "rc=3: heaviest = {}, canonical = {}", + &heaviest_am, &canonical_am + ); + + assert_eq!(heaviest_am, AffirmationMap::decode("apa").unwrap()); + assert_eq!(canonical_am, AffirmationMap::decode("apap").unwrap()); +} diff --git a/src/burnchains/tests/burnchain.rs b/src/burnchains/tests/burnchain.rs new file mode 100644 index 0000000000..632bbd9e79 --- /dev/null +++ b/src/burnchains/tests/burnchain.rs @@ -0,0 +1,1121 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2021 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use ed25519_dalek::Keypair as VRFKeypair; +use rand::rngs::ThreadRng; +use rand::thread_rng; +use serde::Serialize; +use sha2::Sha512; + +use crate::types::chainstate::StacksAddress; +use crate::types::proof::TrieHash; +use address::AddressHashMode; +use burnchains::affirmation::*; +use burnchains::bitcoin::address::*; +use burnchains::bitcoin::keys::BitcoinPublicKey; +use burnchains::bitcoin::*; +use burnchains::*; +use burnchains::{BurnchainBlock, BurnchainBlockHeader, Txid}; +use chainstate::burn::db::sortdb::{SortitionDB, SortitionHandleTx}; +use chainstate::burn::distribution::BurnSamplePoint; +use chainstate::burn::operations::{ + leader_block_commit::BURN_BLOCK_MINED_AT_MODULUS, BlockstackOperationType, LeaderBlockCommitOp, + LeaderKeyRegisterOp, UserBurnSupportOp, +}; +use chainstate::burn::{BlockSnapshot, ConsensusHash, OpsHash, SortitionHash}; +use chainstate::stacks::StacksPublicKey; +use util::db::Error as db_error; +use util::get_epoch_time_secs; +use util::hash::hex_bytes; +use util::hash::to_hex; +use util::hash::Hash160; +use util::log; +use util::secp256k1::Secp256k1PrivateKey; +use util::uint::BitArray; +use util::uint::Uint256; +use util::uint::Uint512; +use util::vrf::VRFPrivateKey; +use util::vrf::VRFPublicKey; + +use crate::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, PoxId, SortitionId, VRFSeed}; + +#[test] +fn test_process_block_ops() { + let first_burn_hash = BurnchainHeaderHash::from_hex( + "0000000000000000000000000000000000000000000000000000000000000123", + ) + .unwrap(); + let first_block_height = 120; + + let burnchain = Burnchain { + pox_constants: PoxConstants::test_default(), + peer_version: 0x012345678, + network_id: 0x9abcdef0, + chain_name: "bitcoin".to_string(), + network_name: "testnet".to_string(), + working_dir: "/nope".to_string(), + consensus_hash_lifetime: 24, + stable_confirmations: 7, + first_block_height, + initial_reward_start_block: first_block_height, + first_block_timestamp: 0, + first_block_hash: BurnchainHeaderHash::zero(), + }; + let first_burn_hash = BurnchainHeaderHash::from_hex( + "0000000000000000000000000000000000000000000000000000000000000123", + ) + .unwrap(); + let block_121_hash = BurnchainHeaderHash::from_hex( + "0000000000000000000000000000000000000000000000000000000000000012", + ) + .unwrap(); + let block_122_hash = BurnchainHeaderHash::from_hex( + "0000000000000000000000000000000000000000000000000000000000000002", + ) + .unwrap(); + let block_123_hash = BurnchainHeaderHash::from_hex( + "0000000000000000000000000000000000000000000000000000000000000001", + ) + .unwrap(); + let block_124_hash_initial = BurnchainHeaderHash::from_hex( + "0000000000000000000000000000000000000000000000000000000000000004", + ) + .unwrap(); + + let leader_key_1 = LeaderKeyRegisterOp { + consensus_hash: ConsensusHash::from_bytes( + &hex_bytes("0000000000000000000000000000000000000000").unwrap(), + ) + .unwrap(), + public_key: VRFPublicKey::from_bytes( + &hex_bytes("a366b51292bef4edd64063d9145c617fec373bceb0758e98cd72becd84d54c7a").unwrap(), + ) + .unwrap(), + memo: vec![01, 02, 03, 04, 05], + address: StacksAddress::from_bitcoin_address( + &BitcoinAddress::from_scriptpubkey( + BitcoinNetworkType::Testnet, + &hex_bytes("76a914306231b2782b5f80d944bf69f9d46a1453a0a0eb88ac").unwrap(), + ) + .unwrap(), + ), + + txid: Txid::from_bytes( + &hex_bytes("1bfa831b5fc56c858198acb8e77e5863c1e9d8ac26d49ddb914e24d8d4083562").unwrap(), + ) + .unwrap(), + vtxindex: 456, + block_height: 123, + burn_header_hash: block_123_hash.clone(), + }; + + let leader_key_2 = LeaderKeyRegisterOp { + consensus_hash: ConsensusHash::from_bytes( + &hex_bytes("0000000000000000000000000000000000000000").unwrap(), + ) + .unwrap(), + public_key: VRFPublicKey::from_bytes( + &hex_bytes("bb519494643f79f1dea0350e6fb9a1da88dfdb6137117fc2523824a8aa44fe1c").unwrap(), + ) + .unwrap(), + memo: vec![01, 02, 03, 04, 05], + address: StacksAddress::from_bitcoin_address( + &BitcoinAddress::from_scriptpubkey( + BitcoinNetworkType::Testnet, + &hex_bytes("76a914306231b2782b5f80d944bf69f9d46a1453a0a0eb88ac").unwrap(), + ) + .unwrap(), + ), + + txid: Txid::from_bytes( + &hex_bytes("9410df84e2b440055c33acb075a0687752df63fe8fe84aeec61abe469f0448c7").unwrap(), + ) + .unwrap(), + vtxindex: 457, + block_height: 122, + burn_header_hash: block_122_hash.clone(), + }; + + let leader_key_3 = LeaderKeyRegisterOp { + consensus_hash: ConsensusHash::from_bytes( + &hex_bytes("0000000000000000000000000000000000000000").unwrap(), + ) + .unwrap(), + public_key: VRFPublicKey::from_bytes( + &hex_bytes("de8af7037e522e65d2fe2d63fb1b764bfea829df78b84444338379df13144a02").unwrap(), + ) + .unwrap(), + memo: vec![01, 02, 03, 04, 05], + address: StacksAddress::from_bitcoin_address( + &BitcoinAddress::from_scriptpubkey( + BitcoinNetworkType::Testnet, + &hex_bytes("76a914f464a593895cd58c74a7352dd4a65c491d0c0bf688ac").unwrap(), + ) + .unwrap(), + ), + + txid: Txid::from_bytes( + &hex_bytes("eb54704f71d4a2d1128d60ffccced547054b52250ada6f3e7356165714f44d4c").unwrap(), + ) + .unwrap(), + vtxindex: 10, + block_height: 121, + burn_header_hash: block_121_hash.clone(), + }; + + let user_burn_1 = UserBurnSupportOp { + address: StacksAddress::new(1, Hash160([1u8; 20])), + consensus_hash: ConsensusHash::from_bytes( + &hex_bytes("0000000000000000000000000000000000000000").unwrap(), + ) + .unwrap(), + public_key: VRFPublicKey::from_bytes( + &hex_bytes("a366b51292bef4edd64063d9145c617fec373bceb0758e98cd72becd84d54c7a").unwrap(), + ) + .unwrap(), + block_header_hash_160: Hash160::from_bytes( + &hex_bytes("7150f635054b87df566a970b21e07030d6444bf2").unwrap(), + ) + .unwrap(), // 22222....2222 + key_block_ptr: 123, + key_vtxindex: 456, + burn_fee: 10000, + + txid: Txid::from_bytes( + &hex_bytes("1d5cbdd276495b07f0e0bf0181fa57c175b217bc35531b078d62fc20986c716b").unwrap(), + ) + .unwrap(), + vtxindex: 13, + block_height: 124, + burn_header_hash: block_124_hash_initial.clone(), + }; + + let user_burn_1_2 = UserBurnSupportOp { + address: StacksAddress::new(2, Hash160([2u8; 20])), + consensus_hash: ConsensusHash::from_bytes( + &hex_bytes("0000000000000000000000000000000000000000").unwrap(), + ) + .unwrap(), + public_key: VRFPublicKey::from_bytes( + &hex_bytes("a366b51292bef4edd64063d9145c617fec373bceb0758e98cd72becd84d54c7a").unwrap(), + ) + .unwrap(), + block_header_hash_160: Hash160::from_bytes( + &hex_bytes("7150f635054b87df566a970b21e07030d6444bf2").unwrap(), + ) + .unwrap(), // 22222....2222 + key_block_ptr: 123, + key_vtxindex: 456, + burn_fee: 30000, + + txid: Txid::from_bytes( + &hex_bytes("1d5cbdd276495b07f0e0bf0181fa57c175b217bc35531b078d62fc20986c716c").unwrap(), + ) + .unwrap(), + vtxindex: 14, + block_height: 124, + burn_header_hash: block_124_hash_initial.clone(), + }; + + let user_burn_2 = UserBurnSupportOp { + address: StacksAddress::new(3, Hash160([3u8; 20])), + consensus_hash: ConsensusHash::from_bytes( + &hex_bytes("0000000000000000000000000000000000000000").unwrap(), + ) + .unwrap(), + public_key: VRFPublicKey::from_bytes( + &hex_bytes("bb519494643f79f1dea0350e6fb9a1da88dfdb6137117fc2523824a8aa44fe1c").unwrap(), + ) + .unwrap(), + block_header_hash_160: Hash160::from_bytes( + &hex_bytes("037a1e860899a4fa823c18b66f6264d20236ec58").unwrap(), + ) + .unwrap(), // 22222....2223 + key_block_ptr: 122, + key_vtxindex: 457, + burn_fee: 20000, + + txid: Txid::from_bytes( + &hex_bytes("1d5cbdd276495b07f0e0bf0181fa57c175b217bc35531b078d62fc20986c716d").unwrap(), + ) + .unwrap(), + vtxindex: 15, + block_height: 124, + burn_header_hash: block_124_hash_initial.clone(), + }; + + let user_burn_2_2 = UserBurnSupportOp { + address: StacksAddress::new(4, Hash160([4u8; 20])), + consensus_hash: ConsensusHash::from_bytes( + &hex_bytes("0000000000000000000000000000000000000000").unwrap(), + ) + .unwrap(), + public_key: VRFPublicKey::from_bytes( + &hex_bytes("bb519494643f79f1dea0350e6fb9a1da88dfdb6137117fc2523824a8aa44fe1c").unwrap(), + ) + .unwrap(), + block_header_hash_160: Hash160::from_bytes( + &hex_bytes("037a1e860899a4fa823c18b66f6264d20236ec58").unwrap(), + ) + .unwrap(), // 22222....2223 + key_block_ptr: 122, + key_vtxindex: 457, + burn_fee: 40000, + + txid: Txid::from_bytes( + &hex_bytes("1d5cbdd276495b07f0e0bf0181fa57c175b217bc35531b078d62fc20986c716e").unwrap(), + ) + .unwrap(), + vtxindex: 16, + block_height: 124, + burn_header_hash: block_124_hash_initial.clone(), + }; + + // should be rejected + let user_burn_noblock = UserBurnSupportOp { + address: StacksAddress::new(5, Hash160([5u8; 20])), + consensus_hash: ConsensusHash::from_bytes( + &hex_bytes("0000000000000000000000000000000000000000").unwrap(), + ) + .unwrap(), + public_key: VRFPublicKey::from_bytes( + &hex_bytes("a366b51292bef4edd64063d9145c617fec373bceb0758e98cd72becd84d54c7a").unwrap(), + ) + .unwrap(), + block_header_hash_160: Hash160::from_bytes( + &hex_bytes("3333333333333333333333333333333333333333").unwrap(), + ) + .unwrap(), + key_block_ptr: 122, + key_vtxindex: 772, + burn_fee: 12345, + + txid: Txid::from_bytes( + &hex_bytes("1d5cbdd276495b07f0e0bf0181fa57c175b217bc35531b078d62fc20986c716f").unwrap(), + ) + .unwrap(), + vtxindex: 12, + block_height: 123, + burn_header_hash: block_123_hash.clone(), + }; + + // should be rejected + let user_burn_nokey = UserBurnSupportOp { + address: StacksAddress::new(6, Hash160([6u8; 20])), + consensus_hash: ConsensusHash::from_bytes( + &hex_bytes("0000000000000000000000000000000000000000").unwrap(), + ) + .unwrap(), + public_key: VRFPublicKey::from_bytes( + &hex_bytes("3f3338db51f2b1f6ac0cf6177179a24ee130c04ef2f9849a64a216969ab60e70").unwrap(), + ) + .unwrap(), + block_header_hash_160: Hash160::from_bytes( + &hex_bytes("037a1e860899a4fa823c18b66f6264d20236ec58").unwrap(), + ) + .unwrap(), + key_block_ptr: 122, + key_vtxindex: 457, + burn_fee: 12345, + + txid: Txid::from_bytes( + &hex_bytes("1d5cbdd276495b07f0e0bf0181fa57c175b217bc35531b078d62fc20986c7170").unwrap(), + ) + .unwrap(), + vtxindex: 15, + block_height: 123, + burn_header_hash: block_123_hash.clone(), + }; + + let block_commit_1 = LeaderBlockCommitOp { + sunset_burn: 0, + commit_outs: vec![], + block_header_hash: BlockHeaderHash::from_bytes( + &hex_bytes("2222222222222222222222222222222222222222222222222222222222222222").unwrap(), + ) + .unwrap(), + new_seed: VRFSeed::from_bytes( + &hex_bytes("3333333333333333333333333333333333333333333333333333333333333333").unwrap(), + ) + .unwrap(), + parent_block_ptr: 0, + parent_vtxindex: 0, + key_block_ptr: 123, + key_vtxindex: 456, + memo: vec![0x80], + + burn_fee: 12345, + input: (Txid([0; 32]), 0), + apparent_sender: BurnchainSigner { + public_keys: vec![StacksPublicKey::from_hex( + "02d8015134d9db8178ac93acbc43170a2f20febba5087a5b0437058765ad5133d0", + ) + .unwrap()], + num_sigs: 1, + hash_mode: AddressHashMode::SerializeP2PKH, + }, + + txid: Txid::from_bytes( + &hex_bytes("3c07a0a93360bc85047bbaadd49e30c8af770f73a37e10fec400174d2e5f27cf").unwrap(), + ) + .unwrap(), + vtxindex: 444, + block_height: 124, + burn_parent_modulus: (123 % BURN_BLOCK_MINED_AT_MODULUS) as u8, + burn_header_hash: block_124_hash_initial.clone(), + }; + + let block_commit_2 = LeaderBlockCommitOp { + sunset_burn: 0, + commit_outs: vec![], + block_header_hash: BlockHeaderHash::from_bytes( + &hex_bytes("2222222222222222222222222222222222222222222222222222222222222223").unwrap(), + ) + .unwrap(), + new_seed: VRFSeed::from_bytes( + &hex_bytes("3333333333333333333333333333333333333333333333333333333333333334").unwrap(), + ) + .unwrap(), + parent_block_ptr: 0, + parent_vtxindex: 0, + key_block_ptr: 122, + key_vtxindex: 457, + memo: vec![0x80], + + burn_fee: 12345, + input: (Txid([0; 32]), 0), + apparent_sender: BurnchainSigner { + public_keys: vec![StacksPublicKey::from_hex( + "02d8015134d9db8178ac93acbc43170a2f20febba5087a5b0437058765ad5133d0", + ) + .unwrap()], + num_sigs: 1, + hash_mode: AddressHashMode::SerializeP2PKH, + }, + + txid: Txid::from_bytes( + &hex_bytes("3c07a0a93360bc85047bbaadd49e30c8af770f73a37e10fec400174d2e5f27d0").unwrap(), + ) + .unwrap(), + vtxindex: 445, + block_height: 124, + burn_parent_modulus: (123 % BURN_BLOCK_MINED_AT_MODULUS) as u8, + burn_header_hash: block_124_hash_initial.clone(), + }; + + let block_commit_3 = LeaderBlockCommitOp { + sunset_burn: 0, + commit_outs: vec![], + block_header_hash: BlockHeaderHash::from_bytes( + &hex_bytes("2222222222222222222222222222222222222222222222222222222222222224").unwrap(), + ) + .unwrap(), + new_seed: VRFSeed::from_bytes( + &hex_bytes("3333333333333333333333333333333333333333333333333333333333333335").unwrap(), + ) + .unwrap(), + parent_block_ptr: 0, + parent_vtxindex: 0, + key_block_ptr: 121, + key_vtxindex: 10, + memo: vec![0x80], + + burn_fee: 23456, + input: (Txid([0; 32]), 0), + apparent_sender: BurnchainSigner { + public_keys: vec![StacksPublicKey::from_hex( + "0283d603abdd2392646dbdd0dc80beb39c25bfab96a8a921ea5e7517ce533f8cd5", + ) + .unwrap()], + num_sigs: 1, + hash_mode: AddressHashMode::SerializeP2PKH, + }, + + txid: Txid::from_bytes( + &hex_bytes("301dc687a9f06a1ae87a013f27133e9cec0843c2983567be73e185827c7c13de").unwrap(), + ) + .unwrap(), + vtxindex: 446, + block_height: 124, + burn_parent_modulus: (123 % BURN_BLOCK_MINED_AT_MODULUS) as u8, + burn_header_hash: block_124_hash_initial.clone(), + }; + + let block_ops_121: Vec = + vec![BlockstackOperationType::LeaderKeyRegister( + leader_key_3.clone(), + )]; + let block_opshash_121 = OpsHash::from_txids(&vec![leader_key_3.txid.clone()]); + let block_prev_chs_121 = + vec![ConsensusHash::from_hex("0000000000000000000000000000000000000000").unwrap()]; + let mut block_121_snapshot = BlockSnapshot { + accumulated_coinbase_ustx: 0, + pox_valid: true, + block_height: 121, + burn_header_hash: block_121_hash.clone(), + sortition_id: SortitionId(block_121_hash.0.clone()), + parent_sortition_id: SortitionId(block_121_hash.0.clone()), + burn_header_timestamp: 121, + parent_burn_header_hash: first_burn_hash.clone(), + ops_hash: block_opshash_121.clone(), + consensus_hash: ConsensusHash::from_ops( + &block_121_hash, + &block_opshash_121, + 0, + &block_prev_chs_121, + &PoxId::stubbed(), + ), + total_burn: 0, + sortition: false, + sortition_hash: SortitionHash::initial().mix_burn_header(&block_121_hash), + winning_block_txid: Txid::from_hex( + "0000000000000000000000000000000000000000000000000000000000000000", + ) + .unwrap(), + winning_stacks_block_hash: BlockHeaderHash::from_hex( + "0000000000000000000000000000000000000000000000000000000000000000", + ) + .unwrap(), + index_root: TrieHash::from_empty_data(), // TBD + num_sortitions: 0, + stacks_block_accepted: false, + stacks_block_height: 0, + arrival_index: 0, + canonical_stacks_tip_height: 0, + canonical_stacks_tip_hash: BlockHeaderHash([0u8; 32]), + canonical_stacks_tip_consensus_hash: ConsensusHash([0u8; 20]), + ..BlockSnapshot::initial(0, &first_burn_hash, 0) + }; + + let block_ops_122: Vec = + vec![BlockstackOperationType::LeaderKeyRegister( + leader_key_2.clone(), + )]; + let block_opshash_122 = OpsHash::from_txids(&vec![leader_key_2.txid.clone()]); + let block_prev_chs_122 = vec![ + block_121_snapshot.consensus_hash.clone(), + ConsensusHash::from_hex("0000000000000000000000000000000000000000").unwrap(), + ]; + let mut block_122_snapshot = BlockSnapshot { + accumulated_coinbase_ustx: 0, + pox_valid: true, + block_height: 122, + burn_header_hash: block_122_hash.clone(), + sortition_id: SortitionId(block_122_hash.0.clone()), + parent_sortition_id: block_121_snapshot.sortition_id.clone(), + burn_header_timestamp: 122, + parent_burn_header_hash: block_121_hash.clone(), + ops_hash: block_opshash_122.clone(), + consensus_hash: ConsensusHash::from_ops( + &block_122_hash, + &block_opshash_122, + 0, + &block_prev_chs_122, + &PoxId::stubbed(), + ), + total_burn: 0, + sortition: false, + sortition_hash: SortitionHash::initial() + .mix_burn_header(&block_121_hash) + .mix_burn_header(&block_122_hash), + winning_block_txid: Txid::from_hex( + "0000000000000000000000000000000000000000000000000000000000000000", + ) + .unwrap(), + winning_stacks_block_hash: BlockHeaderHash::from_hex( + "0000000000000000000000000000000000000000000000000000000000000000", + ) + .unwrap(), + index_root: TrieHash::from_empty_data(), // TBD + num_sortitions: 0, + stacks_block_accepted: false, + stacks_block_height: 0, + arrival_index: 0, + canonical_stacks_tip_height: 0, + canonical_stacks_tip_hash: BlockHeaderHash([0u8; 32]), + canonical_stacks_tip_consensus_hash: ConsensusHash([0u8; 20]), + ..BlockSnapshot::initial(0, &first_burn_hash, 0) + }; + + let block_ops_123: Vec = vec![ + BlockstackOperationType::UserBurnSupport(user_burn_noblock.clone()), + BlockstackOperationType::UserBurnSupport(user_burn_nokey.clone()), + BlockstackOperationType::LeaderKeyRegister(leader_key_1.clone()), + ]; + let block_opshash_123 = OpsHash::from_txids(&vec![ + // notably, the user burns here _wont_ be included in the consensus hash + leader_key_1.txid.clone(), + ]); + let block_prev_chs_123 = vec![ + block_122_snapshot.consensus_hash.clone(), + block_121_snapshot.consensus_hash.clone(), + ]; + let mut block_123_snapshot = BlockSnapshot { + accumulated_coinbase_ustx: 0, + pox_valid: true, + block_height: 123, + burn_header_hash: block_123_hash.clone(), + sortition_id: SortitionId(block_123_hash.0.clone()), + parent_sortition_id: block_122_snapshot.sortition_id.clone(), + burn_header_timestamp: 123, + parent_burn_header_hash: block_122_hash.clone(), + ops_hash: block_opshash_123.clone(), + consensus_hash: ConsensusHash::from_ops( + &block_123_hash, + &block_opshash_123, + 0, + &block_prev_chs_123, + &PoxId::stubbed(), + ), // user burns not included, so zero burns this block + total_burn: 0, + sortition: false, + sortition_hash: SortitionHash::initial() + .mix_burn_header(&block_121_hash) + .mix_burn_header(&block_122_hash) + .mix_burn_header(&block_123_hash), + winning_block_txid: Txid::from_hex( + "0000000000000000000000000000000000000000000000000000000000000000", + ) + .unwrap(), + winning_stacks_block_hash: BlockHeaderHash::from_hex( + "0000000000000000000000000000000000000000000000000000000000000000", + ) + .unwrap(), + index_root: TrieHash::from_empty_data(), // TBD + num_sortitions: 0, + stacks_block_accepted: false, + stacks_block_height: 0, + arrival_index: 0, + canonical_stacks_tip_height: 0, + canonical_stacks_tip_hash: BlockHeaderHash([0u8; 32]), + canonical_stacks_tip_consensus_hash: ConsensusHash([0u8; 20]), + ..BlockSnapshot::initial(0, &first_burn_hash, 0) + }; + + // multiple possibilities for block 124 -- we'll reorg the chain each time back to 123 and + // re-try block 124 to test them all. + let block_ops_124_possibilities = vec![ + vec![BlockstackOperationType::LeaderBlockCommit( + block_commit_1.clone(), + )], + vec![ + BlockstackOperationType::LeaderBlockCommit(block_commit_1.clone()), + BlockstackOperationType::LeaderBlockCommit(block_commit_2.clone()), + BlockstackOperationType::LeaderBlockCommit(block_commit_3.clone()), + ], + vec![ + BlockstackOperationType::LeaderBlockCommit(block_commit_1.clone()), + BlockstackOperationType::LeaderBlockCommit(block_commit_2.clone()), + BlockstackOperationType::LeaderBlockCommit(block_commit_3.clone()), + ], + ]; + + let block_124_winners = vec![ + block_commit_1.clone(), + block_commit_3.clone(), + block_commit_1.clone(), + ]; + + let mut db = SortitionDB::connect_test(first_block_height, &first_burn_hash).unwrap(); + + // NOTE: the .txs() method will NOT be called, so we can pass an empty vec![] here + let block121 = BurnchainBlock::Bitcoin(BitcoinBlock::new( + 121, + &block_121_hash, + &first_burn_hash, + &vec![], + 121, + )); + let block122 = BurnchainBlock::Bitcoin(BitcoinBlock::new( + 122, + &block_122_hash, + &block_121_hash, + &vec![], + 122, + )); + let block123 = BurnchainBlock::Bitcoin(BitcoinBlock::new( + 123, + &block_123_hash, + &block_122_hash, + &vec![], + 123, + )); + + let initial_snapshot = BlockSnapshot::initial( + first_block_height, + &first_burn_hash, + first_block_height as u64, + ); + + // process up to 124 + { + let header = block121.header(); + let mut tx = SortitionHandleTx::begin(&mut db, &initial_snapshot.sortition_id).unwrap(); + + let (sn121, _) = tx + .process_block_ops( + &burnchain, + &initial_snapshot, + &header, + block_ops_121, + None, + PoxId::stubbed(), + None, + 0, + ) + .unwrap(); + tx.commit().unwrap(); + + block_121_snapshot.index_root = sn121.index_root.clone(); + block_121_snapshot.parent_sortition_id = sn121.parent_sortition_id.clone(); + assert_eq!(sn121, block_121_snapshot); + } + { + let header = block122.header(); + let mut tx = SortitionHandleTx::begin(&mut db, &block_121_snapshot.sortition_id).unwrap(); + + let (sn122, _) = tx + .process_block_ops( + &burnchain, + &block_121_snapshot, + &header, + block_ops_122, + None, + PoxId::stubbed(), + None, + 0, + ) + .unwrap(); + tx.commit().unwrap(); + + block_122_snapshot.index_root = sn122.index_root.clone(); + block_122_snapshot.parent_sortition_id = sn122.parent_sortition_id.clone(); + assert_eq!(sn122, block_122_snapshot); + } + { + let header = block123.header(); + let mut tx = SortitionHandleTx::begin(&mut db, &block_122_snapshot.sortition_id).unwrap(); + let (sn123, _) = tx + .process_block_ops( + &burnchain, + &block_122_snapshot, + &header, + block_ops_123, + None, + PoxId::stubbed(), + None, + 0, + ) + .unwrap(); + tx.commit().unwrap(); + + block_123_snapshot.index_root = sn123.index_root.clone(); + block_123_snapshot.parent_sortition_id = sn123.parent_sortition_id.clone(); + assert_eq!(sn123, block_123_snapshot); + } + + for scenario_idx in 0..block_ops_124_possibilities.len() { + let mut block_ops_124 = block_ops_124_possibilities[scenario_idx].clone(); + let mut block_124_hash_bytes = block_124_hash_initial.as_bytes().clone(); + block_124_hash_bytes[0] = (scenario_idx + 1) as u8; + let block_124_hash = BurnchainHeaderHash(block_124_hash_bytes); + + for op in block_ops_124.iter_mut() { + op.set_burn_header_hash(block_124_hash.clone()); + } + + // everything will be included + let block_opshash_124 = OpsHash::from_txids( + &block_ops_124 + .clone() + .into_iter() + .map(|bo| bo.txid()) + .collect(), + ); + let block_prev_chs_124 = vec![ + block_123_snapshot.consensus_hash.clone(), + block_122_snapshot.consensus_hash.clone(), + ConsensusHash::from_hex("0000000000000000000000000000000000000000").unwrap(), + ]; + + let burn_total = block_ops_124.iter().fold(0u64, |mut acc, op| { + let bf = match op { + BlockstackOperationType::LeaderBlockCommit(ref op) => op.burn_fee, + BlockstackOperationType::UserBurnSupport(ref op) => 0, + _ => 0, + }; + acc += bf; + acc + }); + + let next_sortition = block_ops_124.len() > 0 && burn_total > 0; + + let mut block_124_snapshot = BlockSnapshot { + accumulated_coinbase_ustx: 400_000_000, + pox_valid: true, + block_height: 124, + burn_header_hash: block_124_hash.clone(), + sortition_id: SortitionId(block_124_hash.0.clone()), + parent_sortition_id: block_123_snapshot.sortition_id.clone(), + burn_header_timestamp: 124, + parent_burn_header_hash: block_123_snapshot.burn_header_hash.clone(), + ops_hash: block_opshash_124.clone(), + consensus_hash: ConsensusHash::from_ops( + &block_124_hash, + &block_opshash_124, + burn_total, + &block_prev_chs_124, + &PoxId::stubbed(), + ), + total_burn: burn_total, + sortition: next_sortition, + sortition_hash: SortitionHash::initial() + .mix_burn_header(&block_121_hash) + .mix_burn_header(&block_122_hash) + .mix_burn_header(&block_123_hash) + .mix_burn_header(&block_124_hash), + winning_block_txid: block_124_winners[scenario_idx].txid.clone(), + winning_stacks_block_hash: block_124_winners[scenario_idx].block_header_hash.clone(), + index_root: TrieHash::from_empty_data(), // TDB + num_sortitions: if next_sortition { 1 } else { 0 }, + stacks_block_accepted: false, + stacks_block_height: 0, + arrival_index: 0, + canonical_stacks_tip_height: 0, + canonical_stacks_tip_hash: BlockHeaderHash([0u8; 32]), + canonical_stacks_tip_consensus_hash: ConsensusHash([0u8; 20]), + ..BlockSnapshot::initial(0, &first_burn_hash, 0) + }; + + if next_sortition { + block_124_snapshot.sortition_hash = block_124_snapshot + .sortition_hash + .mix_VRF_seed(&block_124_winners[scenario_idx].new_seed); + } + + let block124 = BurnchainBlock::Bitcoin(BitcoinBlock::new( + 124, + &block_124_hash, + &block_123_hash, + &vec![], + 124, + )); + + // process this scenario + let sn124 = { + let header = block124.header(); + let mut tx = + SortitionHandleTx::begin(&mut db, &block_123_snapshot.sortition_id).unwrap(); + let (sn124, _) = tx + .process_block_ops( + &burnchain, + &block_123_snapshot, + &header, + block_ops_124, + None, + PoxId::stubbed(), + None, + 0, + ) + .unwrap(); + tx.commit().unwrap(); + + block_124_snapshot.index_root = sn124.index_root.clone(); + block_124_snapshot.parent_sortition_id = sn124.parent_sortition_id.clone(); + sn124 + }; + + assert_eq!(sn124, block_124_snapshot); + + // get all winning block commit hashes. + // There should only be two -- the winning block at height 124, and the genesis + // sentinel block hash. This is because epochs 121, 122, and 123 don't have any block + // commits. + let expected_winning_hashes = vec![ + BlockHeaderHash([0u8; 32]), + block_124_winners[scenario_idx].block_header_hash.clone(), + ]; + + // TODO: pair up with stacks chain state? + /* + let winning_header_hashes = { + let mut tx = db.tx_begin().unwrap(); + BurnDB::get_stacks_block_header_inventory(&mut tx, 124).unwrap() + .iter() + .map(|ref hinv| hinv.0.clone()) + .collect() + }; + + assert_eq!(expected_winning_hashes, winning_header_hashes); + */ + } +} + +#[test] +fn test_burn_snapshot_sequence() { + let first_burn_hash = BurnchainHeaderHash::from_hex( + "0000000000000000000000000000000000000000000000000000000000000123", + ) + .unwrap(); + let first_block_height = 120; + + let burnchain = Burnchain { + pox_constants: PoxConstants::test_default(), + peer_version: 0x012345678, + network_id: 0x9abcdef0, + chain_name: "bitcoin".to_string(), + network_name: "testnet".to_string(), + working_dir: "/nope".to_string(), + consensus_hash_lifetime: 24, + stable_confirmations: 7, + first_block_timestamp: 0, + first_block_hash: first_burn_hash, + first_block_height, + initial_reward_start_block: first_block_height, + }; + + let mut leader_private_keys = vec![]; + let mut leader_public_keys = vec![]; + let mut leader_bitcoin_public_keys = vec![]; + let mut leader_bitcoin_addresses = vec![]; + + for i in 0..32 { + let mut csprng: ThreadRng = thread_rng(); + let keypair: VRFKeypair = VRFKeypair::generate(&mut csprng); + + let privkey_hex = to_hex(&keypair.secret.to_bytes()); + leader_private_keys.push(privkey_hex); + + let pubkey_hex = to_hex(&keypair.public.to_bytes()); + leader_public_keys.push(pubkey_hex); + + let bitcoin_privkey = Secp256k1PrivateKey::new(); + let bitcoin_publickey = BitcoinPublicKey::from_private(&bitcoin_privkey); + + leader_bitcoin_public_keys.push(to_hex(&bitcoin_publickey.to_bytes())); + + let btc_input = BitcoinTxInput { + in_type: BitcoinInputType::Standard, + keys: vec![bitcoin_publickey.clone()], + num_required: 1, + tx_ref: (Txid([0; 32]), 0), + }; + + leader_bitcoin_addresses.push( + BitcoinAddress::from_bytes( + BitcoinNetworkType::Testnet, + BitcoinAddressType::PublicKeyHash, + &btc_input.to_address_bits(), + ) + .unwrap(), + ); + } + + let mut expected_burn_total: u64 = 0; + + // insert all operations + let mut db = SortitionDB::connect_test(first_block_height, &first_burn_hash).unwrap(); + let mut prev_snapshot = BlockSnapshot::initial( + first_block_height, + &first_burn_hash, + first_block_height as u64, + ); + let mut all_stacks_block_hashes = vec![]; + + for i in 0..32 { + let mut block_ops = vec![]; + let burn_block_hash = BurnchainHeaderHash::from_bytes(&vec![ + i + 1, + i + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + i + 1, + ]) + .unwrap(); + let parent_burn_block_hash = prev_snapshot.burn_header_hash.clone(); + let parent_index_root = prev_snapshot.index_root.clone(); + + // insert block commit paired to previous round's leader key, as well as a user burn + if i > 0 { + let next_block_commit = LeaderBlockCommitOp { + sunset_burn: 0, + commit_outs: vec![], + block_header_hash: BlockHeaderHash::from_bytes(&vec![ + i, i, i, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, + ]) + .unwrap(), + new_seed: VRFSeed::from_bytes(&vec![ + i, i, i, i, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, + ]) + .unwrap(), + parent_block_ptr: (if i == 1 { + 0 + } else { + first_block_height + (i as u64) + }) as u32, + parent_vtxindex: (if i == 1 { 0 } else { 2 * (i - 1) }) as u16, + key_block_ptr: (first_block_height + (i as u64)) as u32, + key_vtxindex: (2 * (i - 1) + 1) as u16, + memo: vec![i], + + burn_fee: i as u64, + input: (Txid([0; 32]), 0), + apparent_sender: BurnchainSigner { + public_keys: vec![StacksPublicKey::from_hex( + &leader_bitcoin_public_keys[(i - 1) as usize].clone(), + ) + .unwrap()], + num_sigs: 1, + hash_mode: AddressHashMode::SerializeP2PKH, + }, + + txid: Txid::from_bytes(&vec![ + i, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, i, + ]) + .unwrap(), + vtxindex: (2 * i) as u32, + block_height: first_block_height + ((i + 1) as u64), + burn_parent_modulus: ((first_block_height + (i as u64)) + % BURN_BLOCK_MINED_AT_MODULUS) as u8, + burn_header_hash: burn_block_hash.clone(), + }; + + all_stacks_block_hashes.push(next_block_commit.block_header_hash.clone()); + block_ops.push(BlockstackOperationType::LeaderBlockCommit( + next_block_commit, + )); + } + + let ch = { + let ic = db.index_handle(&prev_snapshot.sortition_id); + ic.get_consensus_at((i as u64) + first_block_height) + .unwrap() + .unwrap_or(ConsensusHash::empty()) + }; + + let next_leader_key = LeaderKeyRegisterOp { + consensus_hash: ch.clone(), + public_key: VRFPublicKey::from_bytes( + &hex_bytes(&leader_public_keys[i as usize]).unwrap(), + ) + .unwrap(), + memo: vec![0, 0, 0, 0, i], + address: StacksAddress::from_bitcoin_address( + &leader_bitcoin_addresses[i as usize].clone(), + ), + + txid: Txid::from_bytes(&vec![ + i, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, + ]) + .unwrap(), + vtxindex: (2 * i + 1) as u32, + block_height: first_block_height + (i + 1) as u64, + burn_header_hash: burn_block_hash.clone(), + }; + + block_ops.push(BlockstackOperationType::LeaderKeyRegister(next_leader_key)); + + let block = BurnchainBlock::Bitcoin(BitcoinBlock::new( + first_block_height + (i + 1) as u64, + &burn_block_hash, + &parent_burn_block_hash, + &vec![], + get_epoch_time_secs(), + )); + + // process this block + let snapshot = { + let header = block.header(); + let mut tx = SortitionHandleTx::begin(&mut db, &prev_snapshot.sortition_id).unwrap(); + let (sn, _) = tx + .process_block_ops( + &burnchain, + &prev_snapshot, + &header, + block_ops, + None, + PoxId::stubbed(), + None, + 0, + ) + .unwrap(); + tx.commit().unwrap(); + sn + }; + + if i > 0 { + expected_burn_total += i as u64; + + assert_eq!(snapshot.total_burn, expected_burn_total); + assert_eq!( + snapshot.winning_block_txid, + Txid::from_bytes(&vec![ + i, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, i + ]) + .unwrap() + ); + assert_eq!( + snapshot.winning_stacks_block_hash, + BlockHeaderHash::from_bytes(&vec![ + i, i, i, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0 + ]) + .unwrap() + ); + assert_eq!(snapshot.burn_header_hash, burn_block_hash); + assert_eq!(snapshot.parent_burn_header_hash, parent_burn_block_hash); + assert_eq!(snapshot.block_height, (i as u64) + 1 + first_block_height); + assert!(snapshot.sortition); + } else { + assert!(!snapshot.sortition); + assert_eq!(snapshot.total_burn, 0); + } + + prev_snapshot = snapshot; + } +} diff --git a/src/burnchains/tests/db.rs b/src/burnchains/tests/db.rs new file mode 100644 index 0000000000..9ef9cec06a --- /dev/null +++ b/src/burnchains/tests/db.rs @@ -0,0 +1,591 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2021 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::cmp; +use std::convert::TryInto; + +use address::*; +use burnchains::bitcoin::address::*; +use burnchains::bitcoin::blocks::*; +use burnchains::bitcoin::*; +use burnchains::db::*; +use burnchains::PoxConstants; +use burnchains::BLOCKSTACK_MAGIC_MAINNET; +use burnchains::*; +use burnchains::{BurnchainBlock, BurnchainBlockHeader, Txid}; +use chainstate::burn::operations::{ + leader_block_commit::BURN_BLOCK_MINED_AT_MODULUS, BlockstackOperationType, LeaderBlockCommitOp, +}; +use chainstate::burn::*; +use chainstate::coordinator::tests::*; +use chainstate::stacks::*; +use deps::bitcoin::blockdata::transaction::Transaction as BtcTx; +use deps::bitcoin::network::serialize::deserialize; +use util::db::Error as DBError; +use util::db::*; +use util::hash::*; + +use crate::types::proof::ClarityMarfTrieId; + +use crate::types::chainstate::{ + BlockHeaderHash, BurnchainHeaderHash, PoxId, SortitionId, StacksAddress, VRFSeed, +}; + +fn make_tx(hex_str: &str) -> BtcTx { + let tx_bin = hex_bytes(hex_str).unwrap(); + deserialize(&tx_bin.to_vec()).unwrap() +} + +impl BurnchainHeaderReader for Vec { + fn read_burnchain_headers( + &self, + start_height: u64, + end_height: u64, + ) -> Result, DBError> { + if start_height >= self.len() as u64 { + return Ok(vec![]); + } + let end = cmp::min(end_height, self.len() as u64) as usize; + Ok(self[(start_height as usize)..end].to_vec()) + } + + fn get_burnchain_headers_height(&self) -> Result { + Ok(self.len() as u64) + } +} + +#[test] +fn test_store_and_fetch() { + let first_bhh = BurnchainHeaderHash([0; 32]); + let first_timestamp = 321; + let first_height = 1; + + let mut burnchain = Burnchain::regtest(":memory:"); + burnchain.pox_constants = PoxConstants::test_default(); + burnchain.pox_constants.sunset_start = 999; + burnchain.pox_constants.sunset_end = 1000; + + burnchain.first_block_height = first_height; + burnchain.first_block_hash = first_bhh.clone(); + burnchain.first_block_timestamp = first_timestamp; + + let mut burnchain_db = BurnchainDB::connect(":memory:", &burnchain, true).unwrap(); + + let first_block_header = burnchain_db.get_canonical_chain_tip().unwrap(); + assert_eq!(&first_block_header.block_hash, &first_bhh); + assert_eq!(&first_block_header.block_height, &first_height); + assert_eq!(first_block_header.timestamp, first_timestamp as u64); + assert_eq!( + &first_block_header.parent_block_hash, + &BurnchainHeaderHash::sentinel() + ); + + let headers = vec![first_block_header.clone()]; + let canon_hash = BurnchainHeaderHash([1; 32]); + + let canonical_block = BurnchainBlock::Bitcoin(BitcoinBlock::new( + 500, + &canon_hash, + &first_bhh, + &vec![], + 485, + )); + let ops = burnchain_db + .store_new_burnchain_block(&burnchain, &headers, &canonical_block) + .unwrap(); + assert_eq!(ops.len(), 0); + + let vtxindex = 1; + let noncanon_block_height = 400; + let non_canon_hash = BurnchainHeaderHash([2; 32]); + + let fixtures = operations::leader_key_register::tests::get_test_fixtures( + vtxindex, + noncanon_block_height, + non_canon_hash, + ); + + let parser = BitcoinBlockParser::new(BitcoinNetworkType::Testnet, BLOCKSTACK_MAGIC_MAINNET); + let mut broadcast_ops = vec![]; + let mut expected_ops = vec![]; + + for (ix, tx_fixture) in fixtures.iter().enumerate() { + let tx = make_tx(&tx_fixture.txstr); + let burnchain_tx = parser.parse_tx(&tx, ix + 1).unwrap(); + if let Some(res) = &tx_fixture.result { + let mut res = res.clone(); + res.vtxindex = (ix + 1).try_into().unwrap(); + expected_ops.push(res.clone()); + } + broadcast_ops.push(burnchain_tx); + } + + let non_canonical_block = BurnchainBlock::Bitcoin(BitcoinBlock::new( + 400, + &non_canon_hash, + &first_bhh, + &broadcast_ops, + 350, + )); + + let ops = burnchain_db + .store_new_burnchain_block(&burnchain, &headers, &non_canonical_block) + .unwrap(); + assert_eq!(ops.len(), expected_ops.len()); + for op in ops.iter() { + let expected_op = expected_ops + .iter() + .find(|candidate| candidate.txid == op.txid()) + .expect("FAILED to find parsed op in expected ops"); + if let BlockstackOperationType::LeaderKeyRegister(op) = op { + assert_eq!(op, expected_op); + } else { + panic!("EXPECTED to parse a LeaderKeyRegister"); + } + } + + let BurnchainBlockData { header, ops } = + BurnchainDB::get_burnchain_block(&burnchain_db.conn(), &non_canon_hash).unwrap(); + assert_eq!(ops.len(), expected_ops.len()); + for op in ops.iter() { + let expected_op = expected_ops + .iter() + .find(|candidate| candidate.txid == op.txid()) + .expect("FAILED to find parsed op in expected ops"); + if let BlockstackOperationType::LeaderKeyRegister(op) = op { + assert_eq!(op, expected_op); + } else { + panic!("EXPECTED to parse a LeaderKeyRegister"); + } + } + assert_eq!(&header, &non_canonical_block.header()); + + let looked_up_canon = burnchain_db.get_canonical_chain_tip().unwrap(); + assert_eq!(&looked_up_canon, &canonical_block.header()); + + let BurnchainBlockData { header, ops } = + BurnchainDB::get_burnchain_block(&burnchain_db.conn(), &canon_hash).unwrap(); + assert_eq!(ops.len(), 0); + assert_eq!(&header, &looked_up_canon); +} + +#[test] +fn test_classify_stack_stx() { + let first_bhh = BurnchainHeaderHash([0; 32]); + let first_timestamp = 321; + let first_height = 1; + + let mut burnchain = Burnchain::regtest(":memory:"); + burnchain.pox_constants = PoxConstants::test_default(); + burnchain.pox_constants.sunset_start = 999; + burnchain.pox_constants.sunset_end = 1000; + + burnchain.first_block_height = first_height; + burnchain.first_block_hash = first_bhh.clone(); + burnchain.first_block_timestamp = first_timestamp; + + let mut burnchain_db = BurnchainDB::connect(":memory:", &burnchain, true).unwrap(); + + let first_block_header = burnchain_db.get_canonical_chain_tip().unwrap(); + assert_eq!(&first_block_header.block_hash, &first_bhh); + assert_eq!(&first_block_header.block_height, &first_height); + assert_eq!(first_block_header.timestamp, first_timestamp as u64); + assert_eq!( + &first_block_header.parent_block_hash, + &BurnchainHeaderHash::sentinel() + ); + + let canon_hash = BurnchainHeaderHash([1; 32]); + let mut headers = vec![first_block_header.clone()]; + + let canonical_block = BurnchainBlock::Bitcoin(BitcoinBlock::new( + 500, + &canon_hash, + &first_bhh, + &vec![], + 485, + )); + let ops = burnchain_db + .store_new_burnchain_block(&burnchain, &headers, &canonical_block) + .unwrap(); + assert_eq!(ops.len(), 0); + + // let's mine a block with a pre-stack-stx tx, and a stack-stx tx, + // the stack-stx tx should _fail_ to verify, because there's no + // corresponding pre-stack-stx. + + let parser = BitcoinBlockParser::new(BitcoinNetworkType::Testnet, BLOCKSTACK_MAGIC_MAINNET); + + let pre_stack_stx_0_txid = Txid([5; 32]); + let pre_stack_stx_0 = BitcoinTransaction { + txid: pre_stack_stx_0_txid.clone(), + vtxindex: 0, + opcode: Opcodes::PreStx as u8, + data: vec![0; 80], + data_amt: 0, + inputs: vec![BitcoinTxInput { + keys: vec![], + num_required: 0, + in_type: BitcoinInputType::Standard, + tx_ref: (Txid([0; 32]), 1), + }], + outputs: vec![BitcoinTxOutput { + units: 10, + address: BitcoinAddress { + addrtype: BitcoinAddressType::PublicKeyHash, + network_id: BitcoinNetworkType::Mainnet, + bytes: Hash160([1; 20]), + }, + }], + }; + + // this one will not have a corresponding pre_stack_stx tx. + let stack_stx_0 = BitcoinTransaction { + txid: Txid([4; 32]), + vtxindex: 1, + opcode: Opcodes::StackStx as u8, + data: vec![1; 80], + data_amt: 0, + inputs: vec![BitcoinTxInput { + keys: vec![], + num_required: 0, + in_type: BitcoinInputType::Standard, + tx_ref: (Txid([0; 32]), 1), + }], + outputs: vec![BitcoinTxOutput { + units: 10, + address: BitcoinAddress { + addrtype: BitcoinAddressType::PublicKeyHash, + network_id: BitcoinNetworkType::Mainnet, + bytes: Hash160([1; 20]), + }, + }], + }; + + // this one will have a corresponding pre_stack_stx tx. + let stack_stx_0_second_attempt = BitcoinTransaction { + txid: Txid([4; 32]), + vtxindex: 2, + opcode: Opcodes::StackStx as u8, + data: vec![1; 80], + data_amt: 0, + inputs: vec![BitcoinTxInput { + keys: vec![], + num_required: 0, + in_type: BitcoinInputType::Standard, + tx_ref: (pre_stack_stx_0_txid.clone(), 1), + }], + outputs: vec![BitcoinTxOutput { + units: 10, + address: BitcoinAddress { + addrtype: BitcoinAddressType::PublicKeyHash, + network_id: BitcoinNetworkType::Mainnet, + bytes: Hash160([2; 20]), + }, + }], + }; + + // this one won't have a corresponding pre_stack_stx tx. + let stack_stx_1 = BitcoinTransaction { + txid: Txid([3; 32]), + vtxindex: 3, + opcode: Opcodes::StackStx as u8, + data: vec![1; 80], + data_amt: 0, + inputs: vec![BitcoinTxInput { + keys: vec![], + num_required: 0, + in_type: BitcoinInputType::Standard, + tx_ref: (Txid([0; 32]), 1), + }], + outputs: vec![BitcoinTxOutput { + units: 10, + address: BitcoinAddress { + addrtype: BitcoinAddressType::PublicKeyHash, + network_id: BitcoinNetworkType::Mainnet, + bytes: Hash160([1; 20]), + }, + }], + }; + + // this one won't use the correct output + let stack_stx_2 = BitcoinTransaction { + txid: Txid([8; 32]), + vtxindex: 4, + opcode: Opcodes::StackStx as u8, + data: vec![1; 80], + data_amt: 0, + inputs: vec![BitcoinTxInput { + keys: vec![], + num_required: 0, + in_type: BitcoinInputType::Standard, + tx_ref: (pre_stack_stx_0_txid.clone(), 2), + }], + outputs: vec![BitcoinTxOutput { + units: 10, + address: BitcoinAddress { + addrtype: BitcoinAddressType::PublicKeyHash, + network_id: BitcoinNetworkType::Mainnet, + bytes: Hash160([1; 20]), + }, + }], + }; + + let ops_0 = vec![pre_stack_stx_0, stack_stx_0]; + + let ops_1 = vec![stack_stx_1, stack_stx_0_second_attempt, stack_stx_2]; + + let block_height_0 = 501; + let block_hash_0 = BurnchainHeaderHash([2; 32]); + let block_height_1 = 502; + let block_hash_1 = BurnchainHeaderHash([3; 32]); + + let block_0 = BurnchainBlock::Bitcoin(BitcoinBlock::new( + block_height_0, + &block_hash_0, + &first_bhh, + &ops_0, + 350, + )); + + headers.push(BurnchainBlockHeader { + block_height: first_block_header.block_height + 1, + block_hash: block_hash_0.clone(), + parent_block_hash: first_bhh.clone(), + num_txs: ops_0.len() as u64, + timestamp: first_block_header.timestamp + 1, + }); + + let block_1 = BurnchainBlock::Bitcoin(BitcoinBlock::new( + block_height_1, + &block_hash_1, + &block_hash_0, + &ops_1, + 360, + )); + + headers.push(BurnchainBlockHeader { + block_height: first_block_header.block_height + 2, + block_hash: block_hash_1.clone(), + parent_block_hash: block_hash_0.clone(), + num_txs: ops_1.len() as u64, + timestamp: first_block_header.timestamp + 2, + }); + + let processed_ops_0 = burnchain_db + .store_new_burnchain_block(&burnchain, &headers, &block_0) + .unwrap(); + + assert_eq!( + processed_ops_0.len(), + 1, + "Only pre_stack_stx op should have been accepted" + ); + + let processed_ops_1 = burnchain_db + .store_new_burnchain_block(&burnchain, &headers, &block_1) + .unwrap(); + + assert_eq!( + processed_ops_1.len(), + 1, + "Only one stack_stx op should have been accepted" + ); + + let expected_pre_stack_addr = StacksAddress::from_bitcoin_address(&BitcoinAddress { + addrtype: BitcoinAddressType::PublicKeyHash, + network_id: BitcoinNetworkType::Mainnet, + bytes: Hash160([1; 20]), + }); + + let expected_reward_addr = StacksAddress::from_bitcoin_address(&BitcoinAddress { + addrtype: BitcoinAddressType::PublicKeyHash, + network_id: BitcoinNetworkType::Mainnet, + bytes: Hash160([2; 20]), + }); + + if let BlockstackOperationType::PreStx(op) = &processed_ops_0[0] { + assert_eq!(&op.output, &expected_pre_stack_addr); + } else { + panic!("EXPECTED to parse a pre stack stx op"); + } + + if let BlockstackOperationType::StackStx(op) = &processed_ops_1[0] { + assert_eq!(&op.sender, &expected_pre_stack_addr); + assert_eq!(&op.reward_addr, &expected_reward_addr); + assert_eq!(op.stacked_ustx, u128::from_be_bytes([1; 16])); + assert_eq!(op.num_cycles, 1); + } else { + panic!("EXPECTED to parse a stack stx op"); + } +} + +pub fn make_simple_block_commit( + burnchain: &Burnchain, + parent: Option<&LeaderBlockCommitOp>, + burn_header: &BurnchainBlockHeader, + block_hash: BlockHeaderHash, +) -> LeaderBlockCommitOp { + let block_height = burn_header.block_height; + let mut new_op = LeaderBlockCommitOp { + sunset_burn: 0, + block_header_hash: block_hash, + new_seed: VRFSeed([1u8; 32]), + parent_block_ptr: 0, + parent_vtxindex: 0, + key_block_ptr: 0, + key_vtxindex: 0, + memo: vec![0], + + commit_outs: vec![ + StacksAddress { + version: 26, + bytes: Hash160::empty(), + }, + StacksAddress { + version: 26, + bytes: Hash160::empty(), + }, + ], + + burn_fee: 10000, + input: (next_txid(), 0), + apparent_sender: BurnchainSigner { + public_keys: vec![StacksPublicKey::from_hex( + "02d8015134d9db8178ac93acbc43170a2f20febba5087a5b0437058765ad5133d0", + ) + .unwrap()], + num_sigs: 1, + hash_mode: AddressHashMode::SerializeP2PKH, + }, + + txid: next_txid(), + vtxindex: 0, + block_height: block_height, + burn_parent_modulus: ((block_height - 1) % BURN_BLOCK_MINED_AT_MODULUS) as u8, + burn_header_hash: burn_header.block_hash.clone(), + }; + + if burnchain.is_in_prepare_phase(block_height) { + new_op.commit_outs = vec![StacksAddress { + version: 26, + bytes: Hash160::empty(), + }]; + } + + if let Some(ref op) = parent { + new_op.parent_block_ptr = op.block_height as u32; + new_op.parent_vtxindex = op.vtxindex as u16; + }; + + new_op +} + +#[test] +fn test_get_commit_at() { + let first_bhh = BurnchainHeaderHash([0; 32]); + let first_timestamp = 0; + let first_height = 1; + + let mut burnchain = Burnchain::regtest(":memory:"); + burnchain.pox_constants = PoxConstants::new(5, 3, 2, 3, 0, 99, 100); + burnchain.first_block_height = first_height; + burnchain.first_block_hash = first_bhh.clone(); + burnchain.first_block_timestamp = first_timestamp; + + let mut burnchain_db = BurnchainDB::connect(":memory:", &burnchain, true).unwrap(); + + let first_block_header = burnchain_db.get_canonical_chain_tip().unwrap(); + + let mut headers = vec![first_block_header.clone()]; + let mut parent = None; + let mut parent_block_header: Option = None; + let mut cmts = vec![]; + + for i in 0..5 { + let hdr = BurnchainHeaderHash([(i + 1) as u8; 32]); + let block_header = BurnchainBlockHeader { + block_height: (first_height + i) as u64, + block_hash: hdr, + parent_block_hash: parent_block_header + .as_ref() + .map(|blk| blk.block_hash.clone()) + .unwrap_or(first_block_header.block_hash.clone()), + num_txs: 1, + timestamp: i as u64, + }; + + headers.push(block_header.clone()); + parent_block_header = Some(block_header); + } + + for i in 0..5 { + let block_header = &headers[i + 1]; + + let cmt = make_simple_block_commit( + &burnchain, + parent.as_ref(), + block_header, + BlockHeaderHash([((i + 1) as u8) | 0x80; 32]), + ); + burnchain_db + .store_new_burnchain_block_ops_unchecked( + &burnchain, + &headers, + block_header, + &vec![BlockstackOperationType::LeaderBlockCommit(cmt.clone())], + ) + .unwrap(); + + cmts.push(cmt.clone()); + parent = Some(cmt); + } + + for i in 0..5 { + let cmt = BurnchainDB::get_commit_at( + &burnchain_db.conn(), + &headers, + (first_height + i) as u32, + 0, + ) + .unwrap() + .unwrap(); + assert_eq!(cmt, cmts[i as usize]); + } + + let cmt = BurnchainDB::get_commit_at(&burnchain_db.conn(), &headers, 5, 0) + .unwrap() + .unwrap(); + assert_eq!(cmt, cmts[4]); + + // fork off the last stored commit block + let fork_hdr = BurnchainHeaderHash([90 as u8; 32]); + let fork_block_header = BurnchainBlockHeader { + block_height: 4, + block_hash: fork_hdr, + parent_block_hash: BurnchainHeaderHash([5 as u8; 32]), + num_txs: 0, + timestamp: 4 as u64, + }; + + burnchain_db + .store_new_burnchain_block_ops_unchecked(&burnchain, &headers, &fork_block_header, &vec![]) + .unwrap(); + headers[4] = fork_block_header; + + let cmt = BurnchainDB::get_commit_at(&burnchain_db.conn(), &headers, 4, 0).unwrap(); + assert!(cmt.is_none()); +} diff --git a/src/burnchains/tests/mod.rs b/src/burnchains/tests/mod.rs new file mode 100644 index 0000000000..4bf749a0c7 --- /dev/null +++ b/src/burnchains/tests/mod.rs @@ -0,0 +1,1128 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2021 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +pub mod affirmation; +pub mod burnchain; +pub mod db; + +use std::collections::HashMap; + +use address::*; +use burnchains::bitcoin::indexer::BitcoinIndexer; +use burnchains::db::*; +use burnchains::Burnchain; +use burnchains::*; +use burnchains::*; +use chainstate::burn::db::sortdb::*; +use chainstate::burn::operations::BlockstackOperationType; +use chainstate::burn::operations::*; +use chainstate::burn::*; +use chainstate::coordinator::comm::*; +use chainstate::coordinator::*; +use chainstate::stacks::*; +use util::db::*; +use util::get_epoch_time_secs; +use util::hash::*; +use util::secp256k1::*; +use util::vrf::*; + +use crate::types::chainstate::{BlockHeaderHash, SortitionId, VRFSeed}; + +impl Txid { + pub fn from_test_data( + block_height: u64, + vtxindex: u32, + burn_header_hash: &BurnchainHeaderHash, + noise: u64, + ) -> Txid { + let mut bytes = vec![]; + bytes.extend_from_slice(&block_height.to_be_bytes()); + bytes.extend_from_slice(&vtxindex.to_be_bytes()); + bytes.extend_from_slice(burn_header_hash.as_bytes()); + bytes.extend_from_slice(&noise.to_be_bytes()); + let h = DoubleSha256::from_data(&bytes[..]); + let mut hb = [0u8; 32]; + hb.copy_from_slice(h.as_bytes()); + + Txid(hb) + } +} + +impl BurnchainHeaderHash { + pub fn from_test_data( + block_height: u64, + index_root: &TrieHash, + noise: u64, + ) -> BurnchainHeaderHash { + let mut bytes = vec![]; + bytes.extend_from_slice(&block_height.to_be_bytes()); + bytes.extend_from_slice(index_root.as_bytes()); + bytes.extend_from_slice(&noise.to_be_bytes()); + let h = DoubleSha256::from_data(&bytes[..]); + let mut hb = [0u8; 32]; + hb.copy_from_slice(h.as_bytes()); + + BurnchainHeaderHash(hb) + } +} + +impl BurnchainBlockHeader { + pub fn from_parent_snapshot( + parent_sn: &BlockSnapshot, + block_hash: BurnchainHeaderHash, + num_txs: u64, + ) -> BurnchainBlockHeader { + BurnchainBlockHeader { + block_height: parent_sn.block_height + 1, + block_hash: block_hash, + parent_block_hash: parent_sn.burn_header_hash.clone(), + num_txs: num_txs, + timestamp: get_epoch_time_secs(), + } + } +} + +#[derive(Debug, Clone)] +pub struct TestBurnchainBlock { + pub block_height: u64, + pub parent_snapshot: BlockSnapshot, + pub txs: Vec, + pub fork_id: u64, + pub timestamp: u64, +} + +#[derive(Debug, Clone)] +pub struct TestBurnchainFork { + pub start_height: u64, + pub mined: u64, + pub tip_index_root: TrieHash, + pub tip_header_hash: BurnchainHeaderHash, + pub tip_sortition_id: SortitionId, + pub pending_blocks: Vec, + pub blocks: Vec, + pub fork_id: u64, +} + +pub struct TestBurnchainNode { + pub sortdb: SortitionDB, + pub dirty: bool, + pub burnchain: Burnchain, +} + +#[derive(Debug, Clone)] +pub struct TestMiner { + pub burnchain: Burnchain, + pub privks: Vec, + pub num_sigs: u16, + pub hash_mode: AddressHashMode, + pub microblock_privks: Vec, + pub vrf_keys: Vec, + pub vrf_key_map: HashMap, + pub block_commits: Vec, + pub id: usize, + pub nonce: u64, + pub spent_at_nonce: HashMap, // how much uSTX this miner paid in a given tx's nonce + pub test_with_tx_fees: bool, // set to true to make certain helper methods attach a pre-defined tx fee +} + +pub struct TestMinerFactory { + pub key_seed: [u8; 32], + pub next_miner_id: usize, +} + +impl TestMiner { + pub fn new( + burnchain: &Burnchain, + privks: &Vec, + num_sigs: u16, + hash_mode: &AddressHashMode, + ) -> TestMiner { + TestMiner { + burnchain: burnchain.clone(), + privks: privks.clone(), + num_sigs, + hash_mode: hash_mode.clone(), + microblock_privks: vec![], + vrf_keys: vec![], + vrf_key_map: HashMap::new(), + block_commits: vec![], + id: 0, + nonce: 0, + spent_at_nonce: HashMap::new(), + test_with_tx_fees: true, + } + } + + pub fn last_VRF_public_key(&self) -> Option { + match self.vrf_keys.len() { + 0 => None, + x => Some(VRFPublicKey::from_private(&self.vrf_keys[x - 1])), + } + } + + pub fn last_block_commit(&self) -> Option { + match self.block_commits.len() { + 0 => None, + x => Some(self.block_commits[x - 1].clone()), + } + } + + pub fn next_VRF_key(&mut self) -> VRFPrivateKey { + let pk = if self.vrf_keys.len() == 0 { + // first key is simply the 32-byte hash of the secret state + let mut buf: Vec = vec![]; + for i in 0..self.privks.len() { + buf.extend_from_slice(&self.privks[i].to_bytes()[..]); + } + buf.extend_from_slice(&[ + (self.num_sigs >> 8) as u8, + (self.num_sigs & 0xff) as u8, + self.hash_mode as u8, + ]); + let h = Sha256Sum::from_data(&buf[..]); + VRFPrivateKey::from_bytes(h.as_bytes()).unwrap() + } else { + // next key is just the hash of the last + let h = Sha256Sum::from_data(self.vrf_keys[self.vrf_keys.len() - 1].as_bytes()); + VRFPrivateKey::from_bytes(h.as_bytes()).unwrap() + }; + + self.vrf_keys.push(pk.clone()); + self.vrf_key_map + .insert(VRFPublicKey::from_private(&pk), pk.clone()); + pk + } + + pub fn next_microblock_privkey(&mut self) -> StacksPrivateKey { + let pk = if self.microblock_privks.len() == 0 { + // first key is simply the 32-byte hash of the secret state + let mut buf: Vec = vec![]; + for i in 0..self.privks.len() { + buf.extend_from_slice(&self.privks[i].to_bytes()[..]); + } + buf.extend_from_slice(&[ + (self.num_sigs >> 8) as u8, + (self.num_sigs & 0xff) as u8, + self.hash_mode as u8, + ]); + let h = Sha256Sum::from_data(&buf[..]); + StacksPrivateKey::from_slice(h.as_bytes()).unwrap() + } else { + // next key is the hash of the last + let h = Sha256Sum::from_data( + &self.microblock_privks[self.microblock_privks.len() - 1].to_bytes(), + ); + StacksPrivateKey::from_slice(h.as_bytes()).unwrap() + }; + + self.microblock_privks.push(pk.clone()); + pk + } + + pub fn make_proof( + &self, + vrf_pubkey: &VRFPublicKey, + last_sortition_hash: &SortitionHash, + ) -> Option { + test_debug!( + "Make proof from {} over {}", + vrf_pubkey.to_hex(), + last_sortition_hash + ); + match self.vrf_key_map.get(vrf_pubkey) { + Some(ref prover_key) => { + let proof = VRF::prove(prover_key, &last_sortition_hash.as_bytes().to_vec()); + let valid = + match VRF::verify(vrf_pubkey, &proof, &last_sortition_hash.as_bytes().to_vec()) + { + Ok(v) => v, + Err(e) => false, + }; + assert!(valid); + Some(proof) + } + None => None, + } + } + + pub fn as_transaction_auth(&self) -> Option { + match self.hash_mode { + AddressHashMode::SerializeP2PKH => TransactionAuth::from_p2pkh(&self.privks[0]), + AddressHashMode::SerializeP2SH => { + TransactionAuth::from_p2sh(&self.privks, self.num_sigs) + } + AddressHashMode::SerializeP2WPKH => TransactionAuth::from_p2wpkh(&self.privks[0]), + AddressHashMode::SerializeP2WSH => { + TransactionAuth::from_p2wsh(&self.privks, self.num_sigs) + } + } + } + + pub fn origin_address(&self) -> Option { + match self.as_transaction_auth() { + Some(auth) => Some(auth.origin().address_testnet()), + None => None, + } + } + + pub fn get_nonce(&self) -> u64 { + self.nonce + } + + pub fn set_nonce(&mut self, n: u64) -> () { + self.nonce = n; + } + + pub fn sign_as_origin(&mut self, tx_signer: &mut StacksTransactionSigner) -> () { + let num_keys = if self.privks.len() < self.num_sigs as usize { + self.privks.len() + } else { + self.num_sigs as usize + }; + + for i in 0..num_keys { + tx_signer.sign_origin(&self.privks[i]).unwrap(); + } + + self.nonce += 1 + } + + pub fn sign_as_sponsor(&mut self, tx_signer: &mut StacksTransactionSigner) -> () { + let num_keys = if self.privks.len() < self.num_sigs as usize { + self.privks.len() + } else { + self.num_sigs as usize + }; + + for i in 0..num_keys { + tx_signer.sign_sponsor(&self.privks[i]).unwrap(); + } + + self.nonce += 1 + } +} + +// creates miners deterministically +impl TestMinerFactory { + pub fn new() -> TestMinerFactory { + TestMinerFactory { + key_seed: [0u8; 32], + next_miner_id: 1, + } + } + + pub fn from_u16(seed: u16) -> TestMinerFactory { + let mut bytes = [0u8; 32]; + (&mut bytes[0..2]).copy_from_slice(&seed.to_be_bytes()); + TestMinerFactory { + key_seed: bytes, + next_miner_id: seed as usize, + } + } + + pub fn next_private_key(&mut self) -> StacksPrivateKey { + let h = Sha256Sum::from_data(&self.key_seed); + self.key_seed.copy_from_slice(h.as_bytes()); + + StacksPrivateKey::from_slice(h.as_bytes()).unwrap() + } + + pub fn next_miner( + &mut self, + burnchain: &Burnchain, + num_keys: u16, + num_sigs: u16, + hash_mode: AddressHashMode, + ) -> TestMiner { + let mut keys = vec![]; + for i in 0..num_keys { + keys.push(self.next_private_key()); + } + + test_debug!("New miner: {:?} {}:{:?}", &hash_mode, num_sigs, &keys); + let mut m = TestMiner::new(burnchain, &keys, num_sigs, &hash_mode); + m.id = self.next_miner_id; + self.next_miner_id += 1; + m + } +} + +impl TestBurnchainBlock { + pub fn new(parent_snapshot: &BlockSnapshot, fork_id: u64) -> TestBurnchainBlock { + TestBurnchainBlock { + parent_snapshot: parent_snapshot.clone(), + block_height: parent_snapshot.block_height + 1, + txs: vec![], + fork_id: fork_id, + timestamp: get_epoch_time_secs(), + } + } + + pub fn add_leader_key_register(&mut self, miner: &mut TestMiner) -> LeaderKeyRegisterOp { + let next_vrf_key = miner.next_VRF_key(); + let mut txop = LeaderKeyRegisterOp::new_from_secrets( + &miner.privks, + miner.num_sigs, + &miner.hash_mode, + &next_vrf_key, + ) + .unwrap(); + + txop.vtxindex = self.txs.len() as u32; + txop.block_height = self.block_height; + txop.burn_header_hash = BurnchainHeaderHash::from_test_data( + txop.block_height, + &self.parent_snapshot.index_root, + self.fork_id, + ); + txop.txid = + Txid::from_test_data(txop.block_height, txop.vtxindex, &txop.burn_header_hash, 0); + txop.consensus_hash = self.parent_snapshot.consensus_hash.clone(); + + self.txs + .push(BlockstackOperationType::LeaderKeyRegister(txop.clone())); + + txop + } + + pub fn add_leader_block_commit( + &mut self, + ic: &SortitionDBConn, + miner: &mut TestMiner, + block_hash: &BlockHeaderHash, + burn_fee: u64, + leader_key: &LeaderKeyRegisterOp, + fork_snapshot: Option<&BlockSnapshot>, + parent_block_snapshot: Option<&BlockSnapshot>, + ) -> LeaderBlockCommitOp { + let input = (Txid([0; 32]), 0); + let pubks = miner + .privks + .iter() + .map(|ref pk| StacksPublicKey::from_private(pk)) + .collect(); + let apparent_sender = BurnchainSigner { + hash_mode: miner.hash_mode.clone(), + num_sigs: miner.num_sigs as usize, + public_keys: pubks, + }; + + let last_snapshot = match fork_snapshot { + Some(sn) => sn.clone(), + None => SortitionDB::get_canonical_burn_chain_tip(ic).unwrap(), + }; + + let last_snapshot_with_sortition = match parent_block_snapshot { + Some(sn) => sn.clone(), + None => SortitionDB::get_first_block_snapshot(ic).unwrap(), + }; + + // prove on the last-ever sortition's hash to produce the new seed + let proof = miner + .make_proof(&leader_key.public_key, &last_snapshot.sortition_hash) + .expect(&format!( + "FATAL: no private key for {}", + leader_key.public_key.to_hex() + )); + + let new_seed = VRFSeed::from_proof(&proof); + + let get_commit_res = SortitionDB::get_block_commit( + ic.conn(), + &last_snapshot_with_sortition.winning_block_txid, + &last_snapshot_with_sortition.sortition_id, + ) + .expect("FATAL: failed to read block commit"); + let mut txop = match get_commit_res { + Some(parent) => { + let txop = LeaderBlockCommitOp::new( + block_hash, + self.block_height, + &new_seed, + &parent, + leader_key.block_height as u32, + leader_key.vtxindex as u16, + burn_fee, + &input, + &apparent_sender, + ); + txop + } + None => { + // initial + let txop = LeaderBlockCommitOp::initial( + block_hash, + self.block_height, + &new_seed, + leader_key, + burn_fee, + &input, + &apparent_sender, + ); + txop + } + }; + + txop.set_burn_height(self.block_height); + txop.vtxindex = self.txs.len() as u32; + txop.burn_header_hash = BurnchainHeaderHash::from_test_data( + txop.block_height, + &self.parent_snapshot.index_root, + self.fork_id, + ); // NOTE: override this if you intend to insert into the sortdb! + txop.txid = + Txid::from_test_data(txop.block_height, txop.vtxindex, &txop.burn_header_hash, 0); + + self.txs + .push(BlockstackOperationType::LeaderBlockCommit(txop.clone())); + + miner.block_commits.push(txop.clone()); + txop + } + + // TODO: user burn support + + pub fn patch_from_chain_tip(&mut self, parent_snapshot: &BlockSnapshot) -> () { + assert_eq!(parent_snapshot.block_height + 1, self.block_height); + + for i in 0..self.txs.len() { + match self.txs[i] { + BlockstackOperationType::LeaderKeyRegister(ref mut data) => { + assert_eq!(data.block_height, self.block_height); + data.consensus_hash = parent_snapshot.consensus_hash.clone(); + } + + BlockstackOperationType::UserBurnSupport(ref mut data) => { + assert_eq!(data.block_height, self.block_height); + data.consensus_hash = parent_snapshot.consensus_hash.clone(); + } + _ => {} + } + } + } + + pub fn mine(&self, db: &mut SortitionDB, burnchain: &Burnchain) -> BlockSnapshot { + let block_hash = BurnchainHeaderHash::from_test_data( + self.block_height, + &self.parent_snapshot.index_root, + self.fork_id, + ); + let mock_bitcoin_block = BitcoinBlock::new( + self.block_height, + &block_hash, + &self.parent_snapshot.burn_header_hash, + &vec![], + get_epoch_time_secs(), + ); + let block = BurnchainBlock::Bitcoin(mock_bitcoin_block); + + test_debug!( + "Process block {} {}", + block.block_height(), + &block.block_hash() + ); + + let header = block.header(); + let sort_id = SortitionId::stubbed(&header.parent_block_hash); + let mut sortition_db_handle = SortitionHandleTx::begin(db, &sort_id).unwrap(); + + let parent_snapshot = sortition_db_handle + .get_block_snapshot(&header.parent_block_hash, &sort_id) + .unwrap() + .expect("FATAL: failed to get burnchain linkage info"); + + let blockstack_txs = self.txs.clone(); + + let burnchain_db = + BurnchainDB::connect(&burnchain.get_burnchaindb_path(), &burnchain, true).unwrap(); + + let new_snapshot = sortition_db_handle + .process_block_txs( + &parent_snapshot, + &header, + burnchain, + blockstack_txs, + None, + PoxId::stubbed(), + None, + 0, + ) + .unwrap(); + sortition_db_handle.commit().unwrap(); + + new_snapshot.0 + } + + pub fn mine_pox<'a, T: BlockEventDispatcher, N: CoordinatorNotices, R: RewardSetProvider>( + &self, + db: &mut SortitionDB, + burnchain: &Burnchain, + coord: &mut ChainsCoordinator<'a, T, N, R>, + ) -> BlockSnapshot { + let block_hash = BurnchainHeaderHash::from_test_data( + self.block_height, + &self.parent_snapshot.index_root, + self.fork_id, + ); + let mock_bitcoin_block = BitcoinBlock::new( + self.block_height, + &block_hash, + &self.parent_snapshot.burn_header_hash, + &vec![], + get_epoch_time_secs(), + ); + let block = BurnchainBlock::Bitcoin(mock_bitcoin_block); + + test_debug!( + "Process PoX block {} {}", + block.block_height(), + &block.block_hash() + ); + + let header = block.header(); + let indexer: BitcoinIndexer = burnchain.make_indexer().unwrap(); + + let mut burnchain_db = BurnchainDB::open(&burnchain.get_burnchaindb_path(), true).unwrap(); + burnchain_db + .raw_store_burnchain_block(burnchain, &indexer, header.clone(), self.txs.clone()) + .unwrap(); + + coord.handle_new_burnchain_block().unwrap(); + + let snapshot = SortitionDB::get_canonical_burn_chain_tip(db.conn()).unwrap(); + snapshot + } +} + +impl TestBurnchainFork { + pub fn new( + start_height: u64, + start_header_hash: &BurnchainHeaderHash, + start_index_root: &TrieHash, + fork_id: u64, + ) -> TestBurnchainFork { + TestBurnchainFork { + start_height, + mined: 0, + tip_header_hash: start_header_hash.clone(), + tip_sortition_id: SortitionId([0x00; 32]), + tip_index_root: start_index_root.clone(), + blocks: vec![], + pending_blocks: vec![], + fork_id: fork_id, + } + } + + pub fn fork(&self) -> TestBurnchainFork { + let mut new_fork = (*self).clone(); + new_fork.fork_id += 1; + new_fork + } + + pub fn append_block(&mut self, b: TestBurnchainBlock) -> () { + self.pending_blocks.push(b); + } + + pub fn get_tip(&mut self, ic: &SortitionDBConn) -> BlockSnapshot { + test_debug!( + "Get tip snapshot at {} (sortition ID {})", + &self.tip_header_hash, + &self.tip_sortition_id + ); + SortitionDB::get_block_snapshot(ic, &self.tip_sortition_id) + .unwrap() + .unwrap() + } + + pub fn next_block(&mut self, ic: &SortitionDBConn) -> TestBurnchainBlock { + let fork_tip = self.get_tip(ic); + TestBurnchainBlock::new(&fork_tip, self.fork_id) + } + + pub fn mine_pending_blocks( + &mut self, + db: &mut SortitionDB, + burnchain: &Burnchain, + ) -> BlockSnapshot { + let mut snapshot = { + let ic = db.index_conn(); + self.get_tip(&ic) + }; + + for mut block in self.pending_blocks.drain(..) { + // fill in consensus hash and block hash, which we may not have known at the call + // to next_block (since we can call next_block() many times without mining blocks) + block.patch_from_chain_tip(&snapshot); + + snapshot = block.mine(db, burnchain); + + self.blocks.push(block); + self.mined += 1; + self.tip_index_root = snapshot.index_root; + self.tip_header_hash = snapshot.burn_header_hash; + self.tip_sortition_id = snapshot.sortition_id; + } + + // give back the new chain tip + snapshot + } + + pub fn mine_pending_blocks_pox< + 'a, + T: BlockEventDispatcher, + N: CoordinatorNotices, + R: RewardSetProvider, + >( + &mut self, + db: &mut SortitionDB, + burnchain: &Burnchain, + coord: &mut ChainsCoordinator<'a, T, N, R>, + ) -> BlockSnapshot { + let mut snapshot = { + let ic = db.index_conn(); + self.get_tip(&ic) + }; + + for mut block in self.pending_blocks.drain(..) { + // fill in consensus hash and block hash, which we may not have known at the call + // to next_block (since we can call next_block() many times without mining blocks) + block.patch_from_chain_tip(&snapshot); + + snapshot = block.mine_pox(db, burnchain, coord); + + self.blocks.push(block); + self.mined += 1; + self.tip_index_root = snapshot.index_root; + self.tip_header_hash = snapshot.burn_header_hash; + self.tip_sortition_id = snapshot.sortition_id; + } + + // give back the new chain tip + snapshot + } +} + +impl TestBurnchainNode { + pub fn new() -> TestBurnchainNode { + let first_block_height = 100; + let first_block_hash = BurnchainHeaderHash([0u8; 32]); + let db = SortitionDB::connect_test(first_block_height, &first_block_hash).unwrap(); + TestBurnchainNode { + sortdb: db, + dirty: false, + burnchain: Burnchain::default_unittest(first_block_height, &first_block_hash), + } + } + + pub fn mine_fork(&mut self, fork: &mut TestBurnchainFork) -> BlockSnapshot { + fork.mine_pending_blocks(&mut self.sortdb, &self.burnchain) + } +} + +fn process_next_sortition( + node: &mut TestBurnchainNode, + fork: &mut TestBurnchainFork, + miners: &mut Vec, + prev_keys: &Vec, + block_hashes: &Vec, +) -> ( + BlockSnapshot, + Vec, + Vec, + Vec, +) { + assert_eq!(miners.len(), block_hashes.len()); + + let mut block = { + let ic = node.sortdb.index_conn(); + fork.next_block(&ic) + }; + + let mut next_commits = vec![]; + let mut next_prev_keys = vec![]; + + if prev_keys.len() > 0 { + assert_eq!(miners.len(), prev_keys.len()); + + // make a Stacks block (hash) for each of the prior block's keys + for j in 0..miners.len() { + let block_commit_op = { + let ic = node.sortdb.index_conn(); + let hash = block_hashes[j].clone(); + block.add_leader_block_commit( + &ic, + &mut miners[j], + &hash, + ((j + 1) as u64) * 1000, + &prev_keys[j], + None, + None, + ) + }; + next_commits.push(block_commit_op); + } + } + + // have each leader register a VRF key + for j in 0..miners.len() { + let key_register_op = block.add_leader_key_register(&mut miners[j]); + next_prev_keys.push(key_register_op); + } + + test_debug!("Mine {} transactions", block.txs.len()); + + fork.append_block(block); + let tip_snapshot = node.mine_fork(fork); + + // TODO: user burn support + (tip_snapshot, next_prev_keys, next_commits, vec![]) +} + +fn verify_keys_accepted(node: &mut TestBurnchainNode, prev_keys: &Vec) -> () { + // all keys accepted + for key in prev_keys.iter() { + let tx_opt = SortitionDB::get_burnchain_transaction(node.sortdb.conn(), &key.txid).unwrap(); + assert!(tx_opt.is_some()); + + let tx = tx_opt.unwrap(); + match tx { + BlockstackOperationType::LeaderKeyRegister(ref op) => { + assert_eq!(*op, *key); + } + _ => { + assert!(false); + } + } + } +} + +fn verify_commits_accepted( + node: &TestBurnchainNode, + next_block_commits: &Vec, +) -> () { + // all commits accepted + for commit in next_block_commits.iter() { + let tx_opt = + SortitionDB::get_burnchain_transaction(node.sortdb.conn(), &commit.txid).unwrap(); + assert!(tx_opt.is_some()); + + let tx = tx_opt.unwrap(); + match tx { + BlockstackOperationType::LeaderBlockCommit(ref op) => { + assert_eq!(*op, *commit); + } + _ => { + assert!(false); + } + } + } +} + +#[test] +fn mine_10_stacks_blocks_1_fork() { + let mut node = TestBurnchainNode::new(); + let mut miner_factory = TestMinerFactory::new(); + + let mut miners = vec![]; + for i in 0..10 { + miners.push(miner_factory.next_miner( + &node.burnchain, + 1, + 1, + AddressHashMode::SerializeP2PKH, + )); + } + + let first_snapshot = SortitionDB::get_first_block_snapshot(node.sortdb.conn()).unwrap(); + let mut fork = TestBurnchainFork::new( + first_snapshot.block_height, + &first_snapshot.burn_header_hash, + &first_snapshot.index_root, + 0, + ); + let mut prev_keys = vec![]; + + for i in 0..10 { + let mut next_block_hashes = vec![]; + for j in 0..miners.len() { + let hash = BlockHeaderHash([(i * 10 + j + miners.len()) as u8; 32]); + next_block_hashes.push(hash); + } + + let (next_snapshot, mut next_prev_keys, next_block_commits, next_user_burns) = + process_next_sortition( + &mut node, + &mut fork, + &mut miners, + &prev_keys, + &next_block_hashes, + ); + + verify_keys_accepted(&mut node, &prev_keys); + verify_commits_accepted(&mut node, &next_block_commits); + + prev_keys.clear(); + prev_keys.append(&mut next_prev_keys); + } +} + +#[test] +fn mine_10_stacks_blocks_2_forks_disjoint() { + let mut node = TestBurnchainNode::new(); + let mut miner_factory = TestMinerFactory::new(); + + let mut miners = vec![]; + for i in 0..10 { + miners.push(miner_factory.next_miner( + &node.burnchain, + 1, + 1, + AddressHashMode::SerializeP2PKH, + )); + } + + let first_snapshot = SortitionDB::get_first_block_snapshot(node.sortdb.conn()).unwrap(); + let mut fork_1 = TestBurnchainFork::new( + first_snapshot.block_height, + &first_snapshot.burn_header_hash, + &first_snapshot.index_root, + 0, + ); + let mut prev_keys_1 = vec![]; + + // one fork for 5 blocks... + for i in 0..5 { + let mut next_block_hashes = vec![]; + for j in 0..miners.len() { + let hash = BlockHeaderHash([(i * 10 + j + miners.len()) as u8; 32]); + next_block_hashes.push(hash); + } + + let (next_snapshot, mut next_prev_keys, next_block_commits, next_user_burns) = + process_next_sortition( + &mut node, + &mut fork_1, + &mut miners, + &prev_keys_1, + &next_block_hashes, + ); + + verify_keys_accepted(&mut node, &prev_keys_1); + verify_commits_accepted(&mut node, &next_block_commits); + + prev_keys_1.clear(); + prev_keys_1.append(&mut next_prev_keys); + } + + let mut fork_2 = fork_1.fork(); + let mut prev_keys_2 = prev_keys_1[5..].to_vec(); + prev_keys_1.truncate(5); + + let mut miners_1 = vec![]; + let mut miners_2 = vec![]; + + let mut miners_drain = miners.drain(..); + for i in 0..5 { + let m = miners_drain.next().unwrap(); + miners_1.push(m); + } + for i in 0..5 { + let m = miners_drain.next().unwrap(); + miners_2.push(m); + } + + // two disjoint forks for 5 blocks... + for i in 5..10 { + let mut next_block_hashes_1 = vec![]; + for j in 0..miners_1.len() { + let hash = BlockHeaderHash( + [(i * (miners_1.len() + miners_2.len()) + j + miners_1.len() + miners_2.len()) + as u8; 32], + ); + next_block_hashes_1.push(hash); + } + + let mut next_block_hashes_2 = vec![]; + for j in 0..miners_2.len() { + let hash = BlockHeaderHash( + [(i * (miners_1.len() + miners_2.len()) + (5 + j) + miners_1.len() + miners_2.len()) + as u8; 32], + ); + next_block_hashes_2.push(hash); + } + + let (next_snapshot_1, mut next_prev_keys_1, next_block_commits_1, next_user_burns_1) = + process_next_sortition( + &mut node, + &mut fork_1, + &mut miners_1, + &prev_keys_1, + &next_block_hashes_1, + ); + let (next_snapshot_2, mut next_prev_keys_2, next_block_commits_2, next_user_burns_2) = + process_next_sortition( + &mut node, + &mut fork_2, + &mut miners_2, + &prev_keys_2, + &next_block_hashes_2, + ); + + assert!(next_snapshot_1.burn_header_hash != next_snapshot_2.burn_header_hash); + + verify_keys_accepted(&mut node, &prev_keys_1); + verify_commits_accepted(&mut node, &next_block_commits_1); + + verify_keys_accepted(&mut node, &prev_keys_2); + verify_commits_accepted(&mut node, &next_block_commits_2); + + prev_keys_1.clear(); + prev_keys_1.append(&mut next_prev_keys_1); + + prev_keys_2.clear(); + prev_keys_2.append(&mut next_prev_keys_2); + } +} + +#[test] +fn mine_10_stacks_blocks_2_forks_disjoint_same_blocks() { + let mut node = TestBurnchainNode::new(); + let mut miner_factory = TestMinerFactory::new(); + + let mut miners = vec![]; + for i in 0..10 { + miners.push(miner_factory.next_miner( + &node.burnchain, + 1, + 1, + AddressHashMode::SerializeP2PKH, + )); + } + + let first_snapshot = SortitionDB::get_first_block_snapshot(node.sortdb.conn()).unwrap(); + let mut fork_1 = TestBurnchainFork::new( + first_snapshot.block_height, + &first_snapshot.burn_header_hash, + &first_snapshot.index_root, + 0, + ); + let mut prev_keys_1 = vec![]; + + // one fork for 5 blocks... + for i in 0..5 { + let mut next_block_hashes = vec![]; + for j in 0..miners.len() { + let hash = BlockHeaderHash([(i * 10 + j + miners.len()) as u8; 32]); + next_block_hashes.push(hash); + } + + let (snapshot, mut next_prev_keys, next_block_commits, next_user_burns) = + process_next_sortition( + &mut node, + &mut fork_1, + &mut miners, + &prev_keys_1, + &next_block_hashes, + ); + + verify_keys_accepted(&mut node, &prev_keys_1); + verify_commits_accepted(&mut node, &next_block_commits); + + prev_keys_1.clear(); + prev_keys_1.append(&mut next_prev_keys); + } + + let mut fork_2 = fork_1.fork(); + let mut prev_keys_2 = prev_keys_1[5..].to_vec(); + prev_keys_1.truncate(5); + + let mut miners_1 = vec![]; + let mut miners_2 = vec![]; + + let mut miners_drain = miners.drain(..); + for i in 0..5 { + let m = miners_drain.next().unwrap(); + miners_1.push(m); + } + for i in 0..5 { + let m = miners_drain.next().unwrap(); + miners_2.push(m); + } + + // two disjoint forks for 5 blocks, but miners in each fork mine the same blocks. + // This tests that we can accept two burnchain forks that each contain the same stacks + // block history. + for i in 5..10 { + let mut next_block_hashes_1 = vec![]; + for j in 0..miners_1.len() { + let hash = BlockHeaderHash( + [(i * (miners_1.len() + miners_2.len()) + j + miners_1.len() + miners_2.len()) + as u8; 32], + ); + next_block_hashes_1.push(hash); + } + + let mut next_block_hashes_2 = vec![]; + for j in 0..miners_2.len() { + let hash = BlockHeaderHash( + [(i * (miners_1.len() + miners_2.len()) + j + miners_1.len() + miners_2.len()) + as u8; 32], + ); + next_block_hashes_2.push(hash); + } + + let (snapshot_1, mut next_prev_keys_1, next_block_commits_1, next_user_burns_1) = + process_next_sortition( + &mut node, + &mut fork_1, + &mut miners_1, + &prev_keys_1, + &next_block_hashes_1, + ); + let (snapshot_2, mut next_prev_keys_2, next_block_commits_2, next_user_burns_2) = + process_next_sortition( + &mut node, + &mut fork_2, + &mut miners_2, + &prev_keys_2, + &next_block_hashes_2, + ); + + assert!(snapshot_1.burn_header_hash != snapshot_2.burn_header_hash); + assert!(snapshot_1.consensus_hash != snapshot_2.consensus_hash); + + // same blocks mined in both forks + assert_eq!(next_block_commits_1.len(), next_block_commits_2.len()); + for i in 0..next_block_commits_1.len() { + assert_eq!( + next_block_commits_1[i].block_header_hash, + next_block_commits_2[i].block_header_hash + ); + } + + verify_keys_accepted(&mut node, &prev_keys_1); + verify_commits_accepted(&mut node, &next_block_commits_1); + + verify_keys_accepted(&mut node, &prev_keys_2); + verify_commits_accepted(&mut node, &next_block_commits_2); + + prev_keys_1.clear(); + prev_keys_1.append(&mut next_prev_keys_1); + + prev_keys_2.clear(); + prev_keys_2.append(&mut next_prev_keys_2); + } +} diff --git a/src/net/mod.rs b/src/net/mod.rs index 73e4873903..7355b62b8b 100644 --- a/src/net/mod.rs +++ b/src/net/mod.rs @@ -1838,7 +1838,7 @@ pub mod test { use burnchains::bitcoin::*; use burnchains::burnchain::*; use burnchains::db::BurnchainDB; - use burnchains::test::*; + use burnchains::tests::*; use burnchains::*; use chainstate::burn::db::sortdb; use chainstate::burn::db::sortdb::*; From 3c8a9eb43e2c310432a75b05bbd4637900be4f5b Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 6 Jul 2021 13:57:48 -0400 Subject: [PATCH 029/116] refactor: implement get_burnchain_header() as a trait method for BurnchainHeaderReader, and use that instead of get_burnchain_headers() for reading one header --- src/burnchains/affirmation.rs | 14 +++++--------- src/burnchains/db.rs | 15 +++++++-------- 2 files changed, 12 insertions(+), 17 deletions(-) diff --git a/src/burnchains/affirmation.rs b/src/burnchains/affirmation.rs index 1842d0d222..4e9693d3a8 100644 --- a/src/burnchains/affirmation.rs +++ b/src/burnchains/affirmation.rs @@ -335,13 +335,10 @@ pub fn read_parent_block_commits<'a, B: BurnchainHeaderReader>( let mut parents = HashMap::new(); for ops in prepare_phase_ops.iter() { for opdata in ops.iter() { - let mut hdrs = indexer.read_burnchain_headers( - opdata.parent_block_ptr as u64, - (opdata.parent_block_ptr + 1) as u64, - )?; - let hdr = match hdrs.len() { - 1 => hdrs.pop().expect("BUG: pop() failure on non-empty vector"), - _ => { + let hdr = + if let Some(hdr) = indexer.read_burnchain_header(opdata.parent_block_ptr as u64)? { + hdr + } else { test_debug!( "Orphan block commit {},{},{}: no such block {}", &opdata.txid, @@ -350,8 +347,7 @@ pub fn read_parent_block_commits<'a, B: BurnchainHeaderReader>( opdata.parent_block_ptr ); continue; - } - }; + }; test_debug!("Get header at {}: {:?}", opdata.parent_block_ptr, &hdr); assert_eq!(hdr.block_height, opdata.parent_block_ptr as u64); diff --git a/src/burnchains/db.rs b/src/burnchains/db.rs index a75317e416..717053dc95 100644 --- a/src/burnchains/db.rs +++ b/src/burnchains/db.rs @@ -64,6 +64,11 @@ pub trait BurnchainHeaderReader { end_height: u64, ) -> Result, DBError>; fn get_burnchain_headers_height(&self) -> Result; + + fn read_burnchain_header(&self, height: u64) -> Result, DBError> { + let mut hdrs = self.read_burnchain_headers(height, height.saturating_add(1))?; + Ok(hdrs.pop()) + } } const NO_ANCHOR_BLOCK: u64 = i64::MAX as u64; @@ -1300,10 +1305,7 @@ impl BurnchainDB { block_ptr: u32, vtxindex: u16, ) -> Result, DBError> { - let header_hash = match indexer - .read_burnchain_headers(block_ptr as u64, (block_ptr + 1) as u64)? - .first() - { + let header_hash = match indexer.read_burnchain_header(block_ptr as u64)? { Some(hdr) => hdr.block_hash, None => { test_debug!("No headers at height {}", block_ptr); @@ -1339,10 +1341,7 @@ impl BurnchainDB { block_ptr: u32, vtxindex: u16, ) -> Result, DBError> { - let header_hash = match indexer - .read_burnchain_headers(block_ptr as u64, (block_ptr + 1) as u64)? - .first() - { + let header_hash = match indexer.read_burnchain_header(block_ptr as u64)? { Some(hdr) => hdr.block_hash, None => { test_debug!("No headers at height {}", block_ptr); From f7e52d99a32266500734d616ece9bdbb858b9ab1 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 6 Jul 2021 13:58:25 -0400 Subject: [PATCH 030/116] refactor: Stacks epochs are comparable, so for changes that are meant to apply at and beyond 2.1, use a comparison operator instead of a match {} --- src/chainstate/coordinator/mod.rs | 39 ++++++++++++++----------------- 1 file changed, 17 insertions(+), 22 deletions(-) diff --git a/src/chainstate/coordinator/mod.rs b/src/chainstate/coordinator/mod.rs index 300b72e67e..c7b5656313 100644 --- a/src/chainstate/coordinator/mod.rs +++ b/src/chainstate/coordinator/mod.rs @@ -1175,29 +1175,24 @@ impl<'a, T: BlockEventDispatcher, N: CoordinatorNotices, U: RewardSetProvider> header.block_height )); - match cur_epoch.epoch_id { - StacksEpochId::Epoch21 => { - // potentially have an anchor block, but only process the next reward cycle (and - // subsequent reward cycles) with it if the prepare-phase block-commits affirm its - // presence. This only gets checked in Stacks 2.1 or later. - - // NOTE: this mutates rc_info - if let Some(missing_anchor_block) = self - .reinterpret_affirmed_pox_anchor_block_status( - &canonical_affirmation_map, - &header, - rc_info, - )? - { - // missing this anchor block -- cannot proceed - info!("Burnchain block processing stops due to missing affirmed anchor block {}", &missing_anchor_block); - return Ok(Some(missing_anchor_block)); - } - } - _ => { - // no-op -- pre 2.1 + if cur_epoch.epoch_id >= StacksEpochId::Epoch21 { + // potentially have an anchor block, but only process the next reward cycle (and + // subsequent reward cycles) with it if the prepare-phase block-commits affirm its + // presence. This only gets checked in Stacks 2.1 or later. + + // NOTE: this mutates rc_info + if let Some(missing_anchor_block) = self + .reinterpret_affirmed_pox_anchor_block_status( + &canonical_affirmation_map, + &header, + rc_info, + )? + { + // missing this anchor block -- cannot proceed + info!("Burnchain block processing stops due to missing affirmed anchor block {}", &missing_anchor_block); + return Ok(Some(missing_anchor_block)); } - }; + } test_debug!( "Reward cycle info at height {}: {:?}", From fb4d7f9f3a2b16cc32a97ebc94e8035c6bb0b2f7 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 7 Jul 2021 16:59:22 -0400 Subject: [PATCH 031/116] refactor: anchor block and anchor block descendant fields may be NULL --- src/burnchains/db.rs | 58 +++++++++++++++++++++++--------------------- 1 file changed, 30 insertions(+), 28 deletions(-) diff --git a/src/burnchains/db.rs b/src/burnchains/db.rs index 717053dc95..e283a5d423 100644 --- a/src/burnchains/db.rs +++ b/src/burnchains/db.rs @@ -36,8 +36,8 @@ use chainstate::burn::operations::{ use chainstate::burn::BlockSnapshot; use chainstate::stacks::index::MarfTrieId; use util::db::{ - query_row, query_row_panic, query_rows, sql_pragma, tx_begin_immediate, tx_busy_handler, - u64_to_sql, DBConn, Error as DBError, FromColumn, FromRow, + opt_u64_to_sql, query_row, query_row_panic, query_rows, sql_pragma, tx_begin_immediate, + tx_busy_handler, u64_to_sql, DBConn, Error as DBError, FromColumn, FromRow, }; use crate::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash}; @@ -71,8 +71,6 @@ pub trait BurnchainHeaderReader { } } -const NO_ANCHOR_BLOCK: u64 = i64::MAX as u64; - #[derive(Debug, Clone)] pub struct BlockCommitMetadata { pub burn_block_hash: BurnchainHeaderHash, @@ -108,18 +106,26 @@ impl FromRow for BlockCommitMetadata { let block_height = u64::from_column(row, "block_height")?; let vtxindex: u32 = row.get_unwrap("vtxindex"); let affirmation_id = u64::from_column(row, "affirmation_id")?; - let anchor_block_u64 = u64::from_column(row, "anchor_block")?; - let anchor_block = if anchor_block_u64 != NO_ANCHOR_BLOCK { - Some(anchor_block_u64) - } else { - None + let anchor_block_i64: Option = row.get_unwrap("anchor_block"); + let anchor_block = match anchor_block_i64 { + Some(ab) => { + if ab < 0 { + return Err(DBError::ParseError); + } + Some(ab as u64) + } + None => None, }; - let anchor_block_descendant_u64 = u64::from_column(row, "anchor_block_descendant")?; - let anchor_block_descendant = if anchor_block_descendant_u64 != NO_ANCHOR_BLOCK { - Some(anchor_block_descendant_u64) - } else { - None + let anchor_block_descendant_i64: Option = row.get_unwrap("anchor_block_descendant"); + let anchor_block_descendant = match anchor_block_descendant_i64 { + Some(abd) => { + if abd < 0 { + return Err(DBError::ParseError); + } + Some(abd as u64) + } + None => None, }; Ok(BlockCommitMetadata { @@ -237,8 +243,8 @@ CREATE TABLE block_commit_metadata ( vtxindex INTEGER NOT NULL, affirmation_id INTEGER NOT NULL, - anchor_block INTEGER NOT NULL, - anchor_block_descendant INTEGER NOT NULL, + anchor_block INTEGER, + anchor_block_descendant INTEGER, PRIMARY KEY(burn_block_hash,txid), FOREIGN KEY(affirmation_id) REFERENCES affirmation_maps(affirmation_id), @@ -304,7 +310,7 @@ impl<'a> BurnchainDBTransaction<'a> { let sql = "UPDATE block_commit_metadata SET affirmation_id = ?1, anchor_block_descendant = ?2 WHERE burn_block_hash = ?3 AND txid = ?4"; let args: &[&dyn ToSql] = &[ &u64_to_sql(affirmation_id)?, - &u64_to_sql(anchor_block_descendant.unwrap_or(NO_ANCHOR_BLOCK))?, + &opt_u64_to_sql(anchor_block_descendant)?, &block_commit.burn_header_hash, &block_commit.txid, ]; @@ -353,7 +359,7 @@ impl<'a> BurnchainDBTransaction<'a> { pub fn clear_anchor_block(&self, reward_cycle: u64) -> Result<(), DBError> { let sql = "UPDATE block_commit_metadata SET anchor_block = ?1 WHERE anchor_block = ?2"; - let args: &[&dyn ToSql] = &[&u64_to_sql(NO_ANCHOR_BLOCK)?, &u64_to_sql(reward_cycle)?]; + let args: &[&dyn ToSql] = &[&(None as Option), &u64_to_sql(reward_cycle)?]; self.sql_tx .execute(sql, args) .map(|_| ()) @@ -379,8 +385,8 @@ impl<'a> BurnchainDBTransaction<'a> { let sql = "UPDATE block_commit_metadata SET affirmation_id = 0, anchor_block = ?1, anchor_block_descendant = ?2 WHERE block_height >= ?3 AND block_height < ?4"; let args: &[&dyn ToSql] = &[ - &u64_to_sql(NO_ANCHOR_BLOCK)?, - &u64_to_sql(NO_ANCHOR_BLOCK)?, + &(None as Option), + &(None as Option), &u64_to_sql(first_block_height)?, &u64_to_sql(last_block_height)?, ]; @@ -798,8 +804,8 @@ impl<'a> BurnchainDBTransaction<'a> { &bcm.txid, &u64_to_sql(bcm.block_height)?, &bcm.vtxindex, - &u64_to_sql(bcm.anchor_block.unwrap_or(NO_ANCHOR_BLOCK))?, - &u64_to_sql(bcm.anchor_block_descendant.unwrap_or(NO_ANCHOR_BLOCK))?, + &opt_u64_to_sql(bcm.anchor_block)?, + &opt_u64_to_sql(bcm.anchor_block_descendant)?, &u64_to_sql(bcm.affirmation_id)?, ]; stmt.execute(args)?; @@ -1149,7 +1155,7 @@ impl BurnchainDB { txid: &Txid, ) -> Result { let sql = "SELECT 1 FROM block_commit_metadata WHERE anchor_block != ?1 AND burn_block_hash = ?2 AND txid = ?3"; - let args: &[&dyn ToSql] = &[&u64_to_sql(NO_ANCHOR_BLOCK)?, burn_header_hash, txid]; + let args: &[&dyn ToSql] = &[&(None as Option), burn_header_hash, txid]; query_row(conn, sql, args)?.ok_or(DBError::NotFoundError) } @@ -1163,10 +1169,6 @@ impl BurnchainDB { conn: &DBConn, reward_cycle: u64, ) -> Result, DBError> { - if reward_cycle == NO_ANCHOR_BLOCK { - return Ok(None); - } - let sql = "SELECT * FROM block_commit_metadata WHERE anchor_block = ?1"; let args: &[&dyn ToSql] = &[&u64_to_sql(reward_cycle)?]; let commit_metadata = match query_row::(conn, sql, args)? { @@ -1368,7 +1370,7 @@ impl BurnchainDB { FROM affirmation_maps JOIN block_commit_metadata ON affirmation_maps.affirmation_id = block_commit_metadata.affirmation_id \ WHERE block_commit_metadata.anchor_block != ?1 \ ORDER BY affirmation_maps.weight DESC, block_commit_metadata.anchor_block DESC", - &[&u64_to_sql(NO_ANCHOR_BLOCK)?] + &[&(None as Option)] )? { Some(metadata) => { let commit = BurnchainDB::get_block_commit(conn, &metadata.txid)? From 1a6611eb77c1ed23ed15a38dfc622392a40aa36e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 7 Jul 2021 16:59:48 -0400 Subject: [PATCH 032/116] refactor: helper method to convert Option into Option for database storage --- src/util/db.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/util/db.rs b/src/util/db.rs index 5da77de9b8..e243e21365 100644 --- a/src/util/db.rs +++ b/src/util/db.rs @@ -217,6 +217,16 @@ pub fn u64_to_sql(x: u64) -> Result { Ok(x as i64) } +pub fn opt_u64_to_sql(x: Option) -> Result, Error> { + match x { + Some(x) => match u64_to_sql(x) { + Ok(x) => Ok(Some(x)), + Err(e) => Err(e), + }, + None => Ok(None), + } +} + macro_rules! impl_byte_array_from_column { ($thing:ident) => { impl rusqlite::types::FromSql for $thing { From 3990b12764682cc423f6fbd2fb876f6754f6a9f3 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 7 Jul 2021 18:37:15 -0400 Subject: [PATCH 033/116] fix: use IS NULL instead of != NULL when appropriate -- don't just blindly pass in None (surprisingly, this has a different meaning) --- src/burnchains/db.rs | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/src/burnchains/db.rs b/src/burnchains/db.rs index e283a5d423..6a07749604 100644 --- a/src/burnchains/db.rs +++ b/src/burnchains/db.rs @@ -233,7 +233,7 @@ CREATE INDEX affirmation_maps_index ON affirmation_maps(affirmation_map); -- ensure anchor block uniqueness CREATE TABLE anchor_blocks ( - reward_cycle INTEGER PRIMARY KEY -- will be i64::MAX if absent + reward_cycle INTEGER PRIMARY KEY ); CREATE TABLE block_commit_metadata ( @@ -260,7 +260,6 @@ CREATE TABLE overrides ( CREATE TABLE db_config(version TEXT NOT NULL); INSERT INTO affirmation_maps(affirmation_id,weight,affirmation_map) VALUES (0,0,""); -- empty affirmation map -INSERT INTO anchor_blocks(reward_cycle) VALUES (9223372036854775807); -- non-existant reward cycle (i64::MAX) "#; impl<'a> BurnchainDBTransaction<'a> { @@ -358,8 +357,8 @@ impl<'a> BurnchainDBTransaction<'a> { } pub fn clear_anchor_block(&self, reward_cycle: u64) -> Result<(), DBError> { - let sql = "UPDATE block_commit_metadata SET anchor_block = ?1 WHERE anchor_block = ?2"; - let args: &[&dyn ToSql] = &[&(None as Option), &u64_to_sql(reward_cycle)?]; + let sql = "UPDATE block_commit_metadata SET anchor_block = NULL WHERE anchor_block = ?1"; + let args: &[&dyn ToSql] = &[&u64_to_sql(reward_cycle)?]; self.sql_tx .execute(sql, args) .map(|_| ()) @@ -383,10 +382,8 @@ impl<'a> BurnchainDBTransaction<'a> { last_block_height ); - let sql = "UPDATE block_commit_metadata SET affirmation_id = 0, anchor_block = ?1, anchor_block_descendant = ?2 WHERE block_height >= ?3 AND block_height < ?4"; + let sql = "UPDATE block_commit_metadata SET affirmation_id = 0, anchor_block = NULL, anchor_block_descendant = NULL WHERE block_height >= ?1 AND block_height < ?2"; let args: &[&dyn ToSql] = &[ - &(None as Option), - &(None as Option), &u64_to_sql(first_block_height)?, &u64_to_sql(last_block_height)?, ]; @@ -1154,8 +1151,8 @@ impl BurnchainDB { burn_header_hash: &BurnchainHeaderHash, txid: &Txid, ) -> Result { - let sql = "SELECT 1 FROM block_commit_metadata WHERE anchor_block != ?1 AND burn_block_hash = ?2 AND txid = ?3"; - let args: &[&dyn ToSql] = &[&(None as Option), burn_header_hash, txid]; + let sql = "SELECT 1 FROM block_commit_metadata WHERE anchor_block IS NOT NULL AND burn_block_hash = ?1 AND txid = ?2"; + let args: &[&dyn ToSql] = &[burn_header_hash, txid]; query_row(conn, sql, args)?.ok_or(DBError::NotFoundError) } @@ -1368,9 +1365,9 @@ impl BurnchainDB { match query_row::( conn, "SELECT block_commit_metadata.* \ FROM affirmation_maps JOIN block_commit_metadata ON affirmation_maps.affirmation_id = block_commit_metadata.affirmation_id \ - WHERE block_commit_metadata.anchor_block != ?1 \ + WHERE block_commit_metadata.anchor_block IS NOT NULL \ ORDER BY affirmation_maps.weight DESC, block_commit_metadata.anchor_block DESC", - &[&(None as Option)] + NO_PARAMS )? { Some(metadata) => { let commit = BurnchainDB::get_block_commit(conn, &metadata.txid)? From ce59cce556d78f76a0434cd3d57def9479203a42 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 7 Jul 2021 18:37:50 -0400 Subject: [PATCH 034/116] fix: update test APIs from next merger --- src/burnchains/tests/mod.rs | 2 +- src/chainstate/coordinator/tests.rs | 2 +- src/net/mod.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/burnchains/tests/mod.rs b/src/burnchains/tests/mod.rs index 4bf749a0c7..f1893cb780 100644 --- a/src/burnchains/tests/mod.rs +++ b/src/burnchains/tests/mod.rs @@ -593,7 +593,7 @@ impl TestBurnchainBlock { ); let header = block.header(); - let indexer: BitcoinIndexer = burnchain.make_indexer().unwrap(); + let indexer = BitcoinIndexer::new_unit_test(&burnchain.working_dir); let mut burnchain_db = BurnchainDB::open(&burnchain.get_burnchaindb_path(), true).unwrap(); burnchain_db diff --git a/src/chainstate/coordinator/tests.rs b/src/chainstate/coordinator/tests.rs index 0da002ddd5..1464c7759e 100644 --- a/src/chainstate/coordinator/tests.rs +++ b/src/chainstate/coordinator/tests.rs @@ -173,7 +173,7 @@ fn produce_burn_block_do_not_set_height<'a, I: Iterator Date: Wed, 7 Jul 2021 18:38:09 -0400 Subject: [PATCH 035/116] fix: cargo fmt update --- src/vm/ast/definition_sorter/mod.rs | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/src/vm/ast/definition_sorter/mod.rs b/src/vm/ast/definition_sorter/mod.rs index 59a0904dbc..5be8ef12ba 100644 --- a/src/vm/ast/definition_sorter/mod.rs +++ b/src/vm/ast/definition_sorter/mod.rs @@ -213,12 +213,11 @@ impl<'a> DefinitionSorter { } } else if let Some(native_function) = // NOTE: can use ClarityVersion::latest() here only as long as NO NEW FUNCTIONS are special cased - // in the definition sorter. + // in the definition sorter. NativeFunctions::lookup_by_name_at_version( - function_name, - &ClarityVersion::latest(), - ) - { + function_name, + &ClarityVersion::latest(), + ) { match native_function { NativeFunctions::ContractCall => { // Args: [contract-name, function-name, ...]: ignore contract-name, function-name, handle rest From 1a5db40839ebe522623506c14e6ea428124bce74 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 8 Jul 2021 12:49:45 -0400 Subject: [PATCH 036/116] fix: tests: instantiate spv headers so unit tests continue to pass --- src/burnchains/bitcoin/indexer.rs | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/src/burnchains/bitcoin/indexer.rs b/src/burnchains/bitcoin/indexer.rs index c04f2f5743..d97fc7a35a 100644 --- a/src/burnchains/bitcoin/indexer.rs +++ b/src/burnchains/bitcoin/indexer.rs @@ -180,7 +180,23 @@ impl BitcoinIndexer { #[cfg(test)] pub fn new_unit_test(working_dir: &str) -> BitcoinIndexer { let mut working_dir_path = PathBuf::from(working_dir); + if fs::metadata(&working_dir_path).is_err() { + fs::create_dir_all(&working_dir_path).unwrap(); + } + working_dir_path.push("headers.sqlite"); + + // instantiate headers DB + let _ = SpvClient::new( + &working_dir_path.to_str().unwrap().to_string(), + 0, + None, + BitcoinNetworkType::Regtest, + true, + false, + ) + .unwrap(); + BitcoinIndexer { config: BitcoinIndexerConfig::default_regtest( working_dir_path.to_str().unwrap().to_string(), From f80f240eff002298d677f2e03df220bf407ffe60 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 8 Jul 2021 13:01:15 -0400 Subject: [PATCH 037/116] cargo fmt --- src/vm/ast/definition_sorter/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/vm/ast/definition_sorter/mod.rs b/src/vm/ast/definition_sorter/mod.rs index 5be8ef12ba..8842a51834 100644 --- a/src/vm/ast/definition_sorter/mod.rs +++ b/src/vm/ast/definition_sorter/mod.rs @@ -213,7 +213,7 @@ impl<'a> DefinitionSorter { } } else if let Some(native_function) = // NOTE: can use ClarityVersion::latest() here only as long as NO NEW FUNCTIONS are special cased - // in the definition sorter. + // in the definition sorter. NativeFunctions::lookup_by_name_at_version( function_name, &ClarityVersion::latest(), From bf3ca85121cbbe5a89f3b9d675b86f9658f30c79 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 8 Jul 2021 15:11:08 -0400 Subject: [PATCH 038/116] fix: more cargo fmt insanity --- src/vm/ast/definition_sorter/mod.rs | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/src/vm/ast/definition_sorter/mod.rs b/src/vm/ast/definition_sorter/mod.rs index 8842a51834..2d144ea981 100644 --- a/src/vm/ast/definition_sorter/mod.rs +++ b/src/vm/ast/definition_sorter/mod.rs @@ -211,13 +211,15 @@ impl<'a> DefinitionSorter { return Ok(()); } } - } else if let Some(native_function) = - // NOTE: can use ClarityVersion::latest() here only as long as NO NEW FUNCTIONS are special cased - // in the definition sorter. + } + // NOTE: can use ClarityVersion::latest() here only as long as NO NEW FUNCTIONS are special cased + // in the definition sorter. + else if let Some(native_function) = NativeFunctions::lookup_by_name_at_version( - function_name, - &ClarityVersion::latest(), - ) { + function_name, + &ClarityVersion::latest(), + ) + { match native_function { NativeFunctions::ContractCall => { // Args: [contract-name, function-name, ...]: ignore contract-name, function-name, handle rest From 9518d50f254efdc9eabfcc01be8d224aa4b5e268 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 19 Aug 2021 18:05:18 -0400 Subject: [PATCH 039/116] fix: more documentation --- src/burnchains/affirmation.rs | 46 ++++++++++++++++++++--------------- 1 file changed, 27 insertions(+), 19 deletions(-) diff --git a/src/burnchains/affirmation.rs b/src/burnchains/affirmation.rs index 4e9693d3a8..cacfff93f4 100644 --- a/src/burnchains/affirmation.rs +++ b/src/burnchains/affirmation.rs @@ -66,6 +66,8 @@ impl AffirmationMapEntry { } } +/// An affirmation map is simply a list of affirmation map entries. This struct merely wraps the +/// list behind accessor and mutator methods. #[derive(Clone, PartialEq)] pub struct AffirmationMap { affirmations: Vec, @@ -115,8 +117,8 @@ impl AffirmationMap { } } - pub fn at(&self, reward_cycle: u64) -> Option { - self.affirmations.get(reward_cycle as usize).cloned() + pub fn at(&self, reward_cycle: u64) -> Option<&AffirmationMapEntry> { + self.affirmations.get(reward_cycle as usize) } pub fn push(&mut self, entry: AffirmationMapEntry) { @@ -163,6 +165,7 @@ impl AffirmationMap { /// If `other` contains a reward cycle affirmation that is not present in `self`, then yes. /// (Note that this means that if `other` is a prefix of `self`, then no divergence). /// Return the index into `other` where the affirmation differs from `self`. + /// Return `None` if no difference exists. pub fn find_divergence(&self, other: &AffirmationMap) -> Option { for i in 0..cmp::min(self.len(), other.len()) { if self.affirmations[i] != other.affirmations[i] { @@ -200,7 +203,7 @@ impl AffirmationMap { } /// What is the weight of this affirmation map? - /// i.e. how many times did the network either affirm an anchor block, or made no election? + /// i.e. how many times did the network either affirm an anchor block, or make no election? pub fn weight(&self) -> u64 { let mut weight = 0; for i in 0..self.len() { @@ -216,7 +219,8 @@ impl AffirmationMap { } /// Get a parent/child reward cycle. Only return Some(..) if the reward cycle is known for both -- -/// i.e. their block heights are plausible. +/// i.e. their block heights are plausible -- they are at or after the first burnchain block +/// height. pub fn get_parent_child_reward_cycles( parent: &LeaderBlockCommitOp, block_commit: &LeaderBlockCommitOp, @@ -251,7 +255,10 @@ pub fn get_parent_child_reward_cycles( } /// Read a range of blockstack operations for a prepare phase of a given reward cycle. -/// Only includes block-commits +/// Only includes block-commits. +/// The returned vec is a vec of vecs of block-commits in block order. The ith item is a vec of +/// block-commits in block order for the ith prepare-phase block (item 0 is the first prepare-phase +/// block's block-commits). pub fn read_prepare_phase_commits<'a, B: BurnchainHeaderReader>( burnchain_tx: &BurnchainDBTransaction<'a>, indexer: &B, @@ -259,6 +266,7 @@ pub fn read_prepare_phase_commits<'a, B: BurnchainHeaderReader>( first_block_height: u64, reward_cycle: u64, ) -> Result>, Error> { + // start and end heights of the prepare phase for this reward cycle let start_height = pox_consts .reward_cycle_to_block_height(first_block_height, reward_cycle + 1) - (pox_consts.prepare_length as u64); @@ -306,11 +314,11 @@ pub fn read_prepare_phase_commits<'a, B: BurnchainHeaderReader>( } } block_ops.sort_by(|op1, op2| { - if op1.block_height != op2.block_height { - op1.block_height.cmp(&op2.block_height) - } else { - op1.vtxindex.cmp(&op2.vtxindex) - } + assert_eq!( + op1.block_height, op2.block_height, + "BUG: block loaded ops from a different block height" + ); + op1.vtxindex.cmp(&op2.vtxindex) }); ret.push(block_ops); } @@ -444,7 +452,7 @@ pub fn filter_orphan_block_commits( } /// Given a list of prepare-phase block-commits, filter out the ones that don't have correct burn -/// modulii. +/// modulii. This means that late block-commits don't count as confirmations. pub fn filter_missed_block_commits( prepare_phase_ops: Vec>, ) -> Vec> { @@ -487,11 +495,11 @@ pub fn filter_missed_block_commits( pub fn find_heaviest_block_commit<'a, B: BurnchainHeaderReader>( burnchain_tx: &BurnchainDBTransaction<'a>, indexer: &B, - prepare_ops: &Vec>, + prepare_phase_ops: &Vec>, anchor_threshold: u32, ) -> Result>, u64, u64)>, DBError> { // sanity check -- must be in order by block height and vtxindex - for prepare_block_ops in prepare_ops.iter() { + for prepare_block_ops in prepare_phase_ops.iter() { let mut expected_block_height = None; let mut last_vtxindex = None; for opdata in prepare_block_ops.iter() { @@ -527,7 +535,7 @@ pub fn find_heaviest_block_commit<'a, B: BurnchainHeaderReader>( let mut ancestor_confirmations: BTreeMap<(u64, u32), (HashSet, u64)> = BTreeMap::new(); // calculate each block-commit's parents - for prepare_block_ops in prepare_ops.iter() { + for prepare_block_ops in prepare_phase_ops.iter() { for opdata in prepare_block_ops.iter() { parents.insert( (opdata.block_height, opdata.vtxindex), @@ -540,7 +548,7 @@ pub fn find_heaviest_block_commit<'a, B: BurnchainHeaderReader>( } // calculate the ancestor map -- find the highest non-prepare-phase ancestor for each prepare-phase block-commit. - for prepare_block_ops in prepare_ops.iter().rev() { + for prepare_block_ops in prepare_phase_ops.iter().rev() { for opdata in prepare_block_ops.iter() { let mut cursor = (opdata.block_height, opdata.vtxindex); while let Some((parent_block, parent_vtxindex)) = parents.get(&cursor) { @@ -552,7 +560,7 @@ pub fn find_heaviest_block_commit<'a, B: BurnchainHeaderReader>( // calculate the ancestor confirmations -- figure out how many distinct blocks contain // block-commits that descend from each pre-prepare-phase ancestor - for prepare_block_ops in prepare_ops.iter() { + for prepare_block_ops in prepare_phase_ops.iter() { for opdata in prepare_block_ops.iter() { if let Some((ancestor_height, ancestor_vtxindex)) = ancestors.get(&(opdata.block_height, opdata.vtxindex)) @@ -650,9 +658,9 @@ pub fn find_heaviest_block_commit<'a, B: BurnchainHeaderReader>( // anchor block as the most_burnt. let mut burn_count = 0; - let mut descendancy = Vec::with_capacity(prepare_ops.len()); - for prepare_block_ops in prepare_ops.iter() { - let mut block_descendancy = Vec::with_capacity(prepare_ops.len()); + let mut descendancy = Vec::with_capacity(prepare_phase_ops.len()); + for prepare_block_ops in prepare_phase_ops.iter() { + let mut block_descendancy = Vec::with_capacity(prepare_phase_ops.len()); let mut found_conf = false; for opdata in prepare_block_ops.iter() { if let Some((op_ancestor_height, op_ancestor_vtxindex, ..)) = From 711ab33d2a441b7e9d1f562ff43ef44e95dbee56 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 19 Aug 2021 18:05:46 -0400 Subject: [PATCH 040/116] fix: more documentation --- src/burnchains/db.rs | 40 ++++++++++++++++++++++++++++++++++++++-- 1 file changed, 38 insertions(+), 2 deletions(-) diff --git a/src/burnchains/db.rs b/src/burnchains/db.rs index 6a07749604..55efc50111 100644 --- a/src/burnchains/db.rs +++ b/src/burnchains/db.rs @@ -207,62 +207,92 @@ pub const BURNCHAIN_DB_VERSION: &'static str = "2"; const BURNCHAIN_DB_SCHEMA: &'static str = r#" CREATE TABLE burnchain_db_block_headers ( + -- height of the block (non-negative) block_height INTEGER NOT NULL, + -- 32-byte hash of the block block_hash TEXT UNIQUE NOT NULL, + -- 32-byte hash of this block's parent block parent_block_hash TEXT NOT NULL, + -- number of transactions in this block num_txs INTEGER NOT NULL, + -- Unix timestamp at which this block was mined timestamp INTEGER NOT NULL, PRIMARY KEY(block_hash) ); CREATE TABLE burnchain_db_block_ops ( + -- 32-byte hash of the block that contains this parsed operation block_hash TEXT NOT NULL, + -- opaque serialized operation (e.g. a JSON string) op TEXT NOT NULL, + -- 32-byte transaction ID txid TEXT NOT NULL, + -- ensure that the operation corresponds to an actual block FOREIGN KEY(block_hash) REFERENCES burnchain_db_block_headers(block_hash) ); CREATE TABLE affirmation_maps ( + -- unique ID of this affirmation map affirmation_id INTEGER PRIMARY KEY AUTOINCREMENT, + -- the weight of this affirmation map. "weight" is the number of affirmed anchor blocks weight INTEGER NOT NULL, + -- the affirmation map itself (this is a serialized AffirmationMap) affirmation_map TEXT NOT NULL ); CREATE INDEX affirmation_maps_index ON affirmation_maps(affirmation_map); -- ensure anchor block uniqueness CREATE TABLE anchor_blocks ( + -- the nonnegative reward cycle number reward_cycle INTEGER PRIMARY KEY ); CREATE TABLE block_commit_metadata ( + -- 32-byte hash of the burnchain block that contains this block-cmmit burn_block_hash TEXT NOT NULL, + -- 32-byte hash of the transaction that contains this block-commit txid TEXT NOT NULL, + -- height of the burnchain block in which this block-commit can be found block_height INTEGER NOT NULL, + -- index into the list of transactions in this block at which this block-commit can be found vtxindex INTEGER NOT NULL, + -- ID of this block-commit's affirmation map affirmation_id INTEGER NOT NULL, + -- if not NULL, this block-commit is an anchor block, and this value is the reward cycle for which it is an anchor block anchor_block INTEGER, + -- if not NULL, this block-commit occurs in a reward cycle with an anchor block, *and* this block-commit descends from the anchor block. + -- this value will contain the reward cycle ID. anchor_block_descendant INTEGER, + -- since the burnchain can fork, and since the same transaction can get mined in both forks, ensure global uniqueness PRIMARY KEY(burn_block_hash,txid), + -- make sure the affirmation map exists for this block-commit FOREIGN KEY(affirmation_id) REFERENCES affirmation_maps(affirmation_id), + -- if this block-commit is an anchor block, make sure it corresponds to exactly one reward cycle. FOREIGN KEY(anchor_block) REFERENCES anchor_blocks(reward_cycle) ); --- override the canonical affirmation map at the operator's discression +-- override the canonical affirmation map at the operator's discression. +-- set values in this table only in an emergency -- such as when a hidden anchor block was mined, and the operator +-- wants to avoid a deep Stacks blockchain reorg that would arise if the hidden anchor block was later disclosed. CREATE TABLE overrides ( reward_cycle INTEGER PRIMARY KEY NOT NULL, affirmation_map TEXT NOT NULL ); +-- database version CREATE TABLE db_config(version TEXT NOT NULL); -INSERT INTO affirmation_maps(affirmation_id,weight,affirmation_map) VALUES (0,0,""); -- empty affirmation map +-- empty affirmation map always exists, so foreign key relationships work +INSERT INTO affirmation_maps(affirmation_id,weight,affirmation_map) VALUES (0,0,""); "#; impl<'a> BurnchainDBTransaction<'a> { + /// Store a burnchain block header into the burnchain database. + /// Returns the row ID on success. fn store_burnchain_db_entry( &self, header: &BurnchainBlockHeader, @@ -283,6 +313,7 @@ impl<'a> BurnchainDBTransaction<'a> { } } + /// Add an affirmation map into the database. Returns the affirmation map ID. fn insert_block_commit_affirmation_map( &self, affirmation_map: &AffirmationMap, @@ -300,6 +331,9 @@ impl<'a> BurnchainDBTransaction<'a> { } } + /// Update a block-commit's affirmation state -- namely, record the reward cycle that this + /// block-commit affirms, if any (anchor_block_descendant), and record the affirmation map ID + /// for this block-commit (affirmation_id). fn update_block_commit_affirmation( &self, block_commit: &LeaderBlockCommitOp, @@ -323,6 +357,7 @@ impl<'a> BurnchainDBTransaction<'a> { } } + /// Mark a block-commit as being the anchor block commit for a particular reward cycle. pub fn set_anchor_block( &self, block_commit: &LeaderBlockCommitOp, @@ -356,6 +391,7 @@ impl<'a> BurnchainDBTransaction<'a> { } } + /// Unmark all block-commit(s) that were anchor block(s) for this reward cycle. pub fn clear_anchor_block(&self, reward_cycle: u64) -> Result<(), DBError> { let sql = "UPDATE block_commit_metadata SET anchor_block = NULL WHERE anchor_block = ?1"; let args: &[&dyn ToSql] = &[&u64_to_sql(reward_cycle)?]; From ba937b71dbae9a085ddba43adc5abd1ca0c214cc Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 19 Aug 2021 18:06:00 -0400 Subject: [PATCH 041/116] fix: more test documentation --- src/burnchains/tests/affirmation.rs | 101 +++++++++++++++++++++++----- 1 file changed, 85 insertions(+), 16 deletions(-) diff --git a/src/burnchains/tests/affirmation.rs b/src/burnchains/tests/affirmation.rs index e7a2d7ed66..ae3ed4f4d6 100644 --- a/src/burnchains/tests/affirmation.rs +++ b/src/burnchains/tests/affirmation.rs @@ -169,6 +169,28 @@ fn make_simple_key_register( } } +/// Create a mock reward cycle with a particular anchor block vote outcome -- it either confirms or +/// does not confirm an anchor block. The method returns the data for all new mocked blocks +/// created -- it returns the list of new block headers, and for each new block, it returns the +/// list of block-commits created (if any). In addition, the `headers` argument will be grown to +/// include the new block-headers (so that a succession of calls to this method will grow the given +/// headers argument). The list of headers returned (first tuple item) is in 1-to-1 correspondence +/// with the list of lists of block-commits returned (second tuple item). If the ith item in +/// parent_commits is None, then all the block-commits in the ith list of lists of block-commits +/// will be None. +/// +/// The caller can control how many block-commits get produced per block with the `parent_commits` +/// argument. If parent_commits[i] is Some(..), then a sequence of block-commits will be produced +/// that descend from it. +/// +/// If `confirm_anchor_block` is true, then the prepare-phase of the reward cycle will confirm an +/// anchor block -- there will be sufficiently many confirmations placed on a block-commit in the +/// reward phase. Otherwise, enough preapre-phase blocks will be missing block-commits that no +/// anchor block is selected. +/// +/// All block-commits produced reference the given miner key (given in the `key` argument). All +/// block-commits created, as well as all block headers, will be stored to the given burnchain +/// database (in addition to being returned). pub fn make_reward_cycle_with_vote( burnchain_db: &mut BurnchainDB, burnchain: &Burnchain, @@ -330,6 +352,9 @@ pub fn make_reward_cycle_with_vote( (new_headers, new_commits) } +/// Conveninece wrapper that produces a reward cycle with one sequence of block-commits. Returns +/// the sequence of block headers in this reward cycle, and the list of block-commits created. If +/// parent_commit is None, then the list of block-commits will contain all None's. fn make_simple_reward_cycle( burnchain_db: &mut BurnchainDB, burnchain: &Burnchain, @@ -348,6 +373,9 @@ fn make_simple_reward_cycle( ) } +/// Convenience wrapper that produces a reward cycle with zero or more sequences of block-commits, +/// such that an anchor block-commit is chosen. +/// Returns the list of new block headers and each blocks' commits. pub fn make_reward_cycle( burnchain_db: &mut BurnchainDB, burnchain: &Burnchain, @@ -361,6 +389,9 @@ pub fn make_reward_cycle( make_reward_cycle_with_vote(burnchain_db, burnchain, key, headers, parent_commits, true) } +/// Convenience wrapper that produces a reward cycle with zero or more sequences of block-commits, +/// such that no anchor block-commit is chosen. +/// Returns the list of new block headers and each blocks' commits. pub fn make_reward_cycle_without_anchor( burnchain_db: &mut BurnchainDB, burnchain: &Burnchain, @@ -392,12 +423,7 @@ fn test_read_prepare_phase_commits() { assert_eq!(&first_block_header.block_hash, &first_bhh); assert_eq!(first_block_header.block_height, first_height); assert_eq!(first_block_header.timestamp, first_timestamp as u64); - /* - assert_eq!( - &first_block_header.parent_block_hash, - &BurnchainHeaderHash::sentinel() - ); - */ + eprintln!( "First block parent is {}", &first_block_header.parent_block_hash @@ -868,6 +894,8 @@ fn test_find_heaviest_block_commit() { #[test] fn test_find_heaviest_parent_commit_many_commits() { + // Test finding parent block commits when there's multiple block-commit forks to choose from. + // This tests the tie-breaking logic. let first_bhh = BurnchainHeaderHash([0; 32]); let first_timestamp = 0; let first_height = 0; @@ -970,8 +998,9 @@ fn test_find_heaviest_parent_commit_many_commits() { assert_eq!(total_confs, 3); assert_eq!(total_burns, 1 + 1 + 2 + 2 + 3 + 3); - // make a history with two miners' commits, with some invalid commits. - // The heavier commit descendancy wins -- 2,1 is the anchor block. + // make a history with two miners' commits + // both histories have the same number of confirmations. + // one history represents more BTC than the other. // 1,0 <-- 2,0 <--- 3,0 <--- 4,0 <--- 5,0 (winner) // \ // `---- 2,1 <--- 3,1 <--- 4,1 <--- 5,1 @@ -1001,7 +1030,7 @@ fn test_find_heaviest_parent_commit_many_commits() { all_ops_no_majority[1][1].vtxindex = 1; all_ops_no_majority[1][1].burn_fee = 2; - // 5,0 + // 5,0 -- slightly heavier than 5,1 all_ops_no_majority[2][0].parent_block_ptr = 4; all_ops_no_majority[2][0].parent_vtxindex = 0; all_ops_no_majority[2][0].vtxindex = 0; @@ -1024,11 +1053,14 @@ fn test_find_heaviest_parent_commit_many_commits() { let (heaviest_parent_block_commit, descendancy, total_confs, total_burns) = heaviest_parent_commit_opt.unwrap(); - // best option wins + // either 2,0 or 2,1 is the anchor block, but we break ties in part by weight. + // 5,0 is heavier than 5,1, so 2,0 wins assert_eq!( commits[1][0].as_ref().unwrap(), &heaviest_parent_block_commit ); + // prepare-phase commits x,0 all descend from the anchor block. + // prepare-phase commits x,1 do not. assert_eq!( descendancy, vec![vec![true, false], vec![true, false], vec![true, false]] @@ -1036,8 +1068,9 @@ fn test_find_heaviest_parent_commit_many_commits() { assert_eq!(total_confs, 3); assert_eq!(total_burns, 1 + 2 + 4); - // make a history with two miners' commits, with some invalid commits. - // commit descendancy weight is a tie, so highest commit is the anchor block (2,1) + // make a history with two miners' commits + // both histories have the same amount of confirmations and BTC burnt. + // select the anchor block with the latest confirmation to break ties. // 1,0 <-- 2,0 <--- 3,0 <--- 4,0 <--- 5,0 // \ // `---- 2,1 <--- 3,1 <--- 4,1 <--- 5,1 (winner) @@ -1073,7 +1106,7 @@ fn test_find_heaviest_parent_commit_many_commits() { all_ops_no_majority[2][0].vtxindex = 0; all_ops_no_majority[2][0].burn_fee = 3; - // 5,1 + // 5,1 -- same BTC overall as the history ending at 5,0, but occurs later in the blockchain all_ops_no_majority[2][1].parent_block_ptr = 4; all_ops_no_majority[2][1].parent_vtxindex = 1; all_ops_no_majority[2][1].vtxindex = 1; @@ -1090,11 +1123,14 @@ fn test_find_heaviest_parent_commit_many_commits() { let (heaviest_parent_block_commit, descendancy, total_confs, total_burns) = heaviest_parent_commit_opt.unwrap(); - // best option wins + // number of confirmations and BTC amount are the same in the two fork histories, so break ties + // by choosing the anchor block confirmed by the latest commit. assert_eq!( commits[1][1].as_ref().unwrap(), &heaviest_parent_block_commit ); + // prepare-phase commits x,0 do not descend from an anchor block + // prepare-phase commits x,1 do assert_eq!( descendancy, vec![vec![false, true], vec![false, true], vec![false, true]] @@ -1105,6 +1141,13 @@ fn test_find_heaviest_parent_commit_many_commits() { #[test] fn test_update_pox_affirmation_maps_3_forks() { + // Create three forks, such that each subsequent reward cycle only affirms the first reward cycle's anchor + // block. That is, reward cycle 2 affirms reward cycle 1's anchor block; reward cycle 3 + // affirms reward cycle 1's anchor block but not 2's, and reward cycle 4 affirms reward cycle + // 1's anchor block but not 2's or 3's. Each affirmation map has the same weight, but verify + // that the canonical affirmation map is the *last-discovered* affirmation map (i.e. the one + // with the highest affirmed anchor block -- in this case, the fork in which reward cycle 4 + // affirms reward cycle 1's anchor block, but not 2's or 3's). let first_bhh = BurnchainHeaderHash([0; 32]); let first_timestamp = 0; let first_height = 0; @@ -1155,7 +1198,7 @@ fn test_update_pox_affirmation_maps_3_forks() { update_pox_affirmation_maps(&mut burnchain_db, &headers, 0, &burnchain).unwrap(); - // there's only one anchor block + // there's only one anchor block in the chain so far assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) .unwrap() .is_none()); @@ -1195,7 +1238,7 @@ fn test_update_pox_affirmation_maps_3_forks() { ); update_pox_affirmation_maps(&mut burnchain_db, &headers, 1, &burnchain).unwrap(); - // there's two anchor blocks + // there's two anchor blocks so far -- one for reward cycle 1, and one for reward cycle 2. assert!(BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 0) .unwrap() .is_none()); @@ -1293,6 +1336,9 @@ fn test_update_pox_affirmation_maps_3_forks() { #[test] fn test_update_pox_affirmation_maps_unique_anchor_block() { + // Verify that if two reward cycles choose the same anchor block, the second reward cycle to do + // so will actually have no anchor block at all (since a block-commit can be an anchor block + // for at most one reward cycle). let first_bhh = BurnchainHeaderHash([0; 32]); let first_timestamp = 0; let first_height = 0; @@ -1442,6 +1488,13 @@ fn test_update_pox_affirmation_maps_unique_anchor_block() { #[test] fn test_update_pox_affirmation_maps_absent() { + // Create two fork histories, both of which affirm the *absence* of different anchor blocks, + // and both of which contain stretches of reward cycles in which no reward cycle was chosen. + // Verify that an affirmation map becomes canonical only by affirming the *presence* of more + // anchor blocks than others -- i.e. affirmation maps that grow by adding reward cycles in + // which there was no anchor block chosen do *not* increase in weight (and thus the canonical + // affirmation map does *not* change even though multiple reward cycles pass with no anchor + // block chosen). let first_bhh = BurnchainHeaderHash([0; 32]); let first_timestamp = 0; let first_height = 0; @@ -1794,6 +1847,12 @@ fn test_update_pox_affirmation_maps_absent() { #[test] fn test_update_pox_affirmation_maps_nothing() { + // Create a sequence of reward cycles that alternate between selecting (and affirming) an + // anchor block, and not selecting an anchor block at all. Verify that in all cases the + // canonical affirmation map is still the affirmation map with the most affirmed anchor blocks + // (`pn`), and verify that the heaviest affirmation map (given the unconfirmed anchor block oracle + // closure) can alternate between either `pnpn` or `pnan` based on whether or not the oracle + // declares an anchor block present or absent in the chain state. let first_bhh = BurnchainHeaderHash([0; 32]); let first_timestamp = 0; let first_height = 0; @@ -1992,6 +2051,12 @@ fn test_update_pox_affirmation_maps_nothing() { #[test] fn test_update_pox_affirmation_fork_2_cycles() { + // Create two forks, where miners work on each fork for two cycles (so, there are four reward + // cycles in total, but miners spend the first two reward cycles on fork 1 and the next two + // reward cycles on fork 2). The second fork does NOT affirm the anchor blocks in the first + // fork. Verify that the canonical affirmation map progresses from `paa` to `aap` once the + // second fork affirms two anchor blocks (note that ties in affirmation map weights are broken + // by most-recently-affirmed anchor block). let first_bhh = BurnchainHeaderHash([0; 32]); let first_timestamp = 0; let first_height = 0; @@ -2204,6 +2269,10 @@ fn test_update_pox_affirmation_fork_2_cycles() { #[test] fn test_update_pox_affirmation_fork_duel() { + // Create two forks where miners alternate between working on forks (i.e. selecting anchor + // blocks) at each reward cycle. That is, in odd reward cycles, miners work on fork #1, and in + // even reward cycles, they work on fork #2. Verify that the canonical affirmation map + // flip-flops between that of fork #1 and fork #2 as anchor blocks are subsequently affirmed. let first_bhh = BurnchainHeaderHash([0; 32]); let first_timestamp = 0; let first_height = 0; From 458099b94e651ee432f2b03c5556368d52e69010 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 19 Aug 2021 18:07:13 -0400 Subject: [PATCH 042/116] refactor: remove dead code --- src/burnchains/tests/burnchain.rs | 22 ---------------------- 1 file changed, 22 deletions(-) diff --git a/src/burnchains/tests/burnchain.rs b/src/burnchains/tests/burnchain.rs index 632bbd9e79..35337f5a8c 100644 --- a/src/burnchains/tests/burnchain.rs +++ b/src/burnchains/tests/burnchain.rs @@ -839,28 +839,6 @@ fn test_process_block_ops() { }; assert_eq!(sn124, block_124_snapshot); - - // get all winning block commit hashes. - // There should only be two -- the winning block at height 124, and the genesis - // sentinel block hash. This is because epochs 121, 122, and 123 don't have any block - // commits. - let expected_winning_hashes = vec![ - BlockHeaderHash([0u8; 32]), - block_124_winners[scenario_idx].block_header_hash.clone(), - ]; - - // TODO: pair up with stacks chain state? - /* - let winning_header_hashes = { - let mut tx = db.tx_begin().unwrap(); - BurnDB::get_stacks_block_header_inventory(&mut tx, 124).unwrap() - .iter() - .map(|ref hinv| hinv.0.clone()) - .collect() - }; - - assert_eq!(expected_winning_hashes, winning_header_hashes); - */ } } From cd4d56dae5d6f183b6fb8c109cea51455af8ed49 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 19 Aug 2021 18:07:29 -0400 Subject: [PATCH 043/116] fix: more documentation --- src/chainstate/coordinator/mod.rs | 58 +++++++++++++++++++++++++------ 1 file changed, 48 insertions(+), 10 deletions(-) diff --git a/src/chainstate/coordinator/mod.rs b/src/chainstate/coordinator/mod.rs index 1c17433219..4fa5c438b3 100644 --- a/src/chainstate/coordinator/mod.rs +++ b/src/chainstate/coordinator/mod.rs @@ -600,6 +600,48 @@ impl<'a, T: BlockEventDispatcher, N: CoordinatorNotices, U: RewardSetProvider> Ok(ret) } + /// Compare the coordinator's heaviest affirmation map to the heaviest affirmation map in the + /// burnchain DB. If they are different, then invalidate all sortitions not represented on + /// the coordinator's heaviest affirmation map that are now represented by the burnchain DB's + /// heaviest affirmation map. + /// + /// Care must be taken to ensure that a sortition that was already created, but invalidated, is + /// not re-created. This can happen if the affirmation map flaps, causing a sortition that was + /// created and invalidated to become valid again. The code here addresses this by considering + /// three ranges of sortitions (grouped by reward cycle) when processing a new heaviest + /// affirmation map: + /// + /// * The range of sortitions that are valid in both affirmation maps. These sortitions + /// correspond to the affirmation maps' common prefix. + /// * The range of sortitions that exists and are invalid on the coordinator's current + /// affirmation map, but are valid on the new heaviest affirmation map. These sortitions + /// come strictly after the common prefix, and are identified by the variables + /// `first_invalid_start_block` and `last_invalid_start_block` (which identifies their lowest + /// and highest block heights). + /// * The range of sortitions that are currently valid, and need to be invalidated. This range + /// comes strictly after the aforementioned previously-invalid-but-now-valid sortition range. + /// + /// The code does not modify any sortition state for the common prefix of sortitions. + /// + /// The code identifies the second range of previously-invalid-but-now-valid sortitions and marks them + /// as valid once again. In addition, it updates the Stacks chainstate DB such that any Stacks + /// blocks that were orphaned and never processed can be retried with the now-revalidated + /// sortition. + /// + /// The code identifies the third range of now-invalid sortitions and marks them as invalid in + /// the sortition DB. + /// + /// Note that regardless of the affirmation map status, a Stacks block will remain processed + /// once it gets accepted. Its underlying sortition may become invalidated, in which case, the + /// Stacks block would no longer be considered as part of the canonical Stacks fork (since the + /// canonical Stacks chain tip must reside on a valid sortition). However, a Stacks block that + /// should be processed at the end of the day may temporarily be considered orphaned if there + /// is a "deep" affirmation map reorg that causes at least one reward cycle's sortitions to + /// be treated as invalid. This is what necessitates retrying Stacks blocks that have been + /// downloaded and considered orphaned because they were never processed -- they may in fact be + /// valid and processable once the node has identified the canonical sortition history! + /// + /// The only kinds of errors returned here are database query errors. fn handle_affirmation_reorg(&mut self) -> Result<(), Error> { let canonical_burnchain_tip = self.burnchain_blocks_db.get_canonical_chain_tip()?; let heaviest_am = BurnchainDB::get_heaviest_anchor_block_affirmation_map( @@ -638,7 +680,7 @@ impl<'a, T: BlockEventDispatcher, N: CoordinatorNotices, U: RewardSetProvider> // burn chain height at which we'll re-try orphaned Stacks blocks, and // revalidate the sortitions that were previously invalid but have now been - // made valid + // made valid. let mut first_invalidate_start_block = 0; // set of sortition IDs that are currently invalid, but will need to be reset @@ -694,6 +736,7 @@ impl<'a, T: BlockEventDispatcher, N: CoordinatorNotices, U: RewardSetProvider> // careful -- we might have already procesed sortitions in this // reward cycle with this PoX ID, but that were never confirmed + // by a subsequent prepare phase. let start_height = last_invalidate_start_block; let end_height = canonical_burnchain_tip.block_height; for height in start_height..end_height { @@ -922,7 +965,8 @@ impl<'a, T: BlockEventDispatcher, N: CoordinatorNotices, U: RewardSetProvider> // meaning that we're in the middle of an affirmation reorg. let affirmation = canonical_affirmation_map .at(new_reward_cycle - 1) - .expect("BUG: checked index overflow"); + .expect("BUG: checked index overflow") + .to_owned(); test_debug!("Affirmation '{}' for anchor block of previous reward cycle {} canonical affirmation map {}", &affirmation, new_reward_cycle - 1, &canonical_affirmation_map); // switch reward cycle info assessment based on what the network @@ -1139,8 +1183,6 @@ impl<'a, T: BlockEventDispatcher, N: CoordinatorNotices, U: RewardSetProvider> burn_header_hashes.join(", ") ); - let mut replay_blocks = vec![]; - for unprocessed_block in sortitions_to_process.into_iter() { let BurnchainBlockData { header, ops } = unprocessed_block; @@ -1261,7 +1303,6 @@ impl<'a, T: BlockEventDispatcher, N: CoordinatorNotices, U: RewardSetProvider> self.replay_stacks_blocks(vec![next_snapshot .winning_stacks_block_hash .clone()])?; - replay_blocks.push(next_snapshot.winning_stacks_block_hash); } } @@ -1271,9 +1312,6 @@ impl<'a, T: BlockEventDispatcher, N: CoordinatorNotices, U: RewardSetProvider> self.canonical_sortition_tip = Some(sortition_id.clone()); last_processed_ancestor = sortition_id; - // self.replay_stacks_blocks(replay_blocks)?; - replay_blocks = vec![]; - if let Some(pox_anchor) = self.process_ready_blocks()? { if let Some(expected_anchor_block_hash) = self.process_new_pox_anchor(pox_anchor)? { info!( @@ -1567,8 +1605,8 @@ impl<'a, T: BlockEventDispatcher, N: CoordinatorNotices, U: RewardSetProvider> if canonical_am .at(reward_cycle) - .unwrap_or(AffirmationMapEntry::PoxAnchorBlockAbsent) - == AffirmationMapEntry::PoxAnchorBlockPresent + .unwrap_or(&AffirmationMapEntry::PoxAnchorBlockAbsent) + == &AffirmationMapEntry::PoxAnchorBlockPresent { // yup, we're expecting this info!("Discovered an old anchor block: {}", &pox_anchor); From a4025e7a8579b0d39188198c7bb26bfa6250ea87 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 19 Aug 2021 18:07:39 -0400 Subject: [PATCH 044/116] fix: remove dead documentation --- src/chainstate/coordinator/tests.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/src/chainstate/coordinator/tests.rs b/src/chainstate/coordinator/tests.rs index 1464c7759e..e3fd93a679 100644 --- a/src/chainstate/coordinator/tests.rs +++ b/src/chainstate/coordinator/tests.rs @@ -3043,7 +3043,6 @@ fn test_pox_processable_block_in_different_pox_forks() { [burnchain_blind].iter_mut(), ); - // handle the sortition -- if loop { let missing_anchor_opt = coord.handle_new_burnchain_block().unwrap(); if let Some(missing_anchor) = missing_anchor_opt { From d325d01b4e95106e9e1b9d285ebd35cbb41a5668 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 19 Aug 2021 21:31:24 -0400 Subject: [PATCH 045/116] fix: remove unused code paths (turn them into errors) --- src/burnchains/affirmation.rs | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/src/burnchains/affirmation.rs b/src/burnchains/affirmation.rs index cacfff93f4..efd318f0a2 100644 --- a/src/burnchains/affirmation.rs +++ b/src/burnchains/affirmation.rs @@ -335,6 +335,8 @@ pub fn read_prepare_phase_commits<'a, B: BurnchainHeaderReader>( } /// Find all referenced parent block-commits already in the burnchain DB, so we can extract their VRF seeds. +/// If this method errors out, it's because it couldn't read the burnchain headers DB (or it's +/// corrupted). Either way, the caller may treat this as a fatal condition. pub fn read_parent_block_commits<'a, B: BurnchainHeaderReader>( burnchain_tx: &BurnchainDBTransaction<'a>, indexer: &B, @@ -347,14 +349,12 @@ pub fn read_parent_block_commits<'a, B: BurnchainHeaderReader>( if let Some(hdr) = indexer.read_burnchain_header(opdata.parent_block_ptr as u64)? { hdr } else { - test_debug!( - "Orphan block commit {},{},{}: no such block {}", - &opdata.txid, - opdata.block_height, - opdata.vtxindex, - opdata.parent_block_ptr + // this is pretty bad if this happens + error!( + "Discontiguous header database: no such block {}, but have block {}", + opdata.parent_block_ptr, opdata.block_height ); - continue; + return Err(Error::MissingParentBlock); }; test_debug!("Get header at {}: {:?}", opdata.parent_block_ptr, &hdr); @@ -710,6 +710,7 @@ pub fn find_heaviest_block_commit<'a, B: BurnchainHeaderReader>( /// (c) the descendancy data for the prepare phase. Descendency[i][j] is true if the jth /// block-commit in the ith block in the prepare phase descends from the anchor block, or False /// if not. +/// Returns only database-related errors. pub fn find_pox_anchor_block<'a, B: BurnchainHeaderReader>( burnchain_tx: &BurnchainDBTransaction<'a>, reward_cycle: u64, From 2c7bbb720dabc05d49bbb862468cbba6e0b021e4 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 19 Aug 2021 21:31:40 -0400 Subject: [PATCH 046/116] fix: remove unused code --- src/burnchains/db.rs | 84 +++----------------------------------------- 1 file changed, 4 insertions(+), 80 deletions(-) diff --git a/src/burnchains/db.rs b/src/burnchains/db.rs index 55efc50111..69a2405df4 100644 --- a/src/burnchains/db.rs +++ b/src/burnchains/db.rs @@ -314,7 +314,7 @@ impl<'a> BurnchainDBTransaction<'a> { } /// Add an affirmation map into the database. Returns the affirmation map ID. - fn insert_block_commit_affirmation_map( + pub fn insert_block_commit_affirmation_map( &self, affirmation_map: &AffirmationMap, ) -> Result { @@ -334,7 +334,7 @@ impl<'a> BurnchainDBTransaction<'a> { /// Update a block-commit's affirmation state -- namely, record the reward cycle that this /// block-commit affirms, if any (anchor_block_descendant), and record the affirmation map ID /// for this block-commit (affirmation_id). - fn update_block_commit_affirmation( + pub fn update_block_commit_affirmation( &self, block_commit: &LeaderBlockCommitOp, anchor_block_descendant: Option, @@ -401,35 +401,8 @@ impl<'a> BurnchainDBTransaction<'a> { .map_err(|e| DBError::SqliteError(e)) } - /// Clear the descendancy data and affirmations for all block-commits in a reward cycle - /// (both the reward and prepare phases), as well as anchor block data. - pub fn clear_reward_cycle_descendancies( - &self, - reward_cycle: u64, - burnchain: &Burnchain, - ) -> Result<(), DBError> { - let first_block_height = burnchain.reward_cycle_to_block_height(reward_cycle); - let last_block_height = burnchain.reward_cycle_to_block_height(reward_cycle + 1); - - test_debug!( - "Clear descendancy data for reward cycle {} (blocks {}-{})", - reward_cycle, - first_block_height, - last_block_height - ); - - let sql = "UPDATE block_commit_metadata SET affirmation_id = 0, anchor_block = NULL, anchor_block_descendant = NULL WHERE block_height >= ?1 AND block_height < ?2"; - let args: &[&dyn ToSql] = &[ - &u64_to_sql(first_block_height)?, - &u64_to_sql(last_block_height)?, - ]; - self.sql_tx - .execute(sql, args) - .map(|_| ()) - .map_err(|e| DBError::SqliteError(e)) - } - - /// Calculate a burnchain block's block-commits' descendancy information + /// Calculate a burnchain block's block-commits' descendancy information. + /// Only fails on DB errors. pub fn update_block_descendancy( &self, indexer: &B, @@ -1159,18 +1132,6 @@ impl BurnchainDB { query_row(conn, sql, args) } - pub fn get_affirmation_map_at( - conn: &DBConn, - burn_header_hash: &BurnchainHeaderHash, - txid: &Txid, - ) -> Result, DBError> { - let am_id_opt = BurnchainDB::get_affirmation_map_id_at(conn, burn_header_hash, txid)?; - match am_id_opt { - Some(am_id) => BurnchainDB::get_affirmation_map(conn, am_id), - None => Ok(None), - } - } - pub fn get_block_commit_affirmation_id( conn: &DBConn, block_commit: &LeaderBlockCommitOp, @@ -1217,20 +1178,6 @@ impl BurnchainDB { Ok(Some((commit, commit_metadata))) } - pub fn get_block_commit_affirmation_map( - conn: &DBConn, - block_commit: &LeaderBlockCommitOp, - ) -> Result, DBError> { - let am_id = match BurnchainDB::get_block_commit_affirmation_id(conn, block_commit)? { - Some(am_id) => am_id, - None => { - return Ok(None); - } - }; - - BurnchainDB::get_affirmation_map(conn, am_id) - } - // do NOT call directly; only use in tests pub fn store_new_burnchain_block_ops_unchecked( &mut self, @@ -1370,29 +1317,6 @@ impl BurnchainDB { ) } - pub fn get_commit_metadata_at( - conn: &DBConn, - indexer: &B, - block_ptr: u32, - vtxindex: u16, - ) -> Result, DBError> { - let header_hash = match indexer.read_burnchain_header(block_ptr as u64)? { - Some(hdr) => hdr.block_hash, - None => { - test_debug!("No headers at height {}", block_ptr); - return Ok(None); - } - }; - - let commit = BurnchainDB::get_commit_in_block_at(conn, &header_hash, block_ptr, vtxindex)? - .expect(&format!( - "BUG: no metadata for stored block-commit {},{},{})", - &header_hash, block_ptr, vtxindex - )); - - BurnchainDB::get_commit_metadata(conn, &header_hash, &commit.txid) - } - /// Get the block-commit and block metadata for the anchor block with the heaviest affirmation /// weight. pub fn get_heaviest_anchor_block( From 05dc2a391e4c425d0f5714caeb8e3139a5633e13 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 19 Aug 2021 21:31:59 -0400 Subject: [PATCH 047/116] refactor: api sync --- src/burnchains/tests/affirmation.rs | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/src/burnchains/tests/affirmation.rs b/src/burnchains/tests/affirmation.rs index ae3ed4f4d6..f62e472646 100644 --- a/src/burnchains/tests/affirmation.rs +++ b/src/burnchains/tests/affirmation.rs @@ -74,6 +74,7 @@ fn affirmation_map_encode_decode() { ])) ); assert_eq!(AffirmationMap::decode("x"), None); + assert_eq!(AffirmationMap::decode("\u{0101}"), None); assert_eq!(AffirmationMap::empty().encode(), "".to_string()); assert_eq!( @@ -412,7 +413,7 @@ fn test_read_prepare_phase_commits() { let first_height = 0; let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = PoxConstants::new(10, 5, 3, 3, 0, 99, 100); + burnchain.pox_constants = PoxConstants::new(10, 5, 3, 3, 0, 99, 100, u32::max_value()); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); burnchain.first_block_timestamp = first_timestamp; @@ -478,7 +479,7 @@ fn test_parent_block_commits() { let first_height = 0; let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = PoxConstants::new(10, 5, 3, 3, 0, 99, 100); + burnchain.pox_constants = PoxConstants::new(10, 5, 3, 3, 0, 99, 100, u32::max_value()); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); burnchain.first_block_timestamp = first_timestamp; @@ -569,7 +570,7 @@ fn test_filter_orphan_block_commits() { let first_height = 0; let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = PoxConstants::new(5, 3, 3, 3, 0, 99, 100); + burnchain.pox_constants = PoxConstants::new(5, 3, 3, 3, 0, 99, 100, u32::max_value()); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); burnchain.first_block_timestamp = first_timestamp; @@ -629,7 +630,7 @@ fn test_filter_missed_block_commits() { let first_height = 0; let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = PoxConstants::new(5, 3, 3, 3, 0, 99, 100); + burnchain.pox_constants = PoxConstants::new(5, 3, 3, 3, 0, 99, 100, u32::max_value()); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); burnchain.first_block_timestamp = first_timestamp; @@ -689,7 +690,7 @@ fn test_find_heaviest_block_commit() { let first_height = 0; let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = PoxConstants::new(5, 3, 2, 3, 0, 99, 100); + burnchain.pox_constants = PoxConstants::new(5, 3, 2, 3, 0, 99, 100, u32::max_value()); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); burnchain.first_block_timestamp = first_timestamp; @@ -901,7 +902,7 @@ fn test_find_heaviest_parent_commit_many_commits() { let first_height = 0; let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = PoxConstants::new(5, 3, 2, 3, 0, 99, 100); + burnchain.pox_constants = PoxConstants::new(5, 3, 2, 3, 0, 99, 100, u32::max_value()); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); burnchain.first_block_timestamp = first_timestamp; @@ -1153,7 +1154,7 @@ fn test_update_pox_affirmation_maps_3_forks() { let first_height = 0; let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = PoxConstants::new(10, 5, 3, 3, 0, 99, 100); + burnchain.pox_constants = PoxConstants::new(10, 5, 3, 3, 0, 99, 100, u32::max_value()); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); burnchain.first_block_timestamp = first_timestamp; @@ -1344,7 +1345,7 @@ fn test_update_pox_affirmation_maps_unique_anchor_block() { let first_height = 0; let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = PoxConstants::new(10, 5, 3, 3, 0, 99, 100); + burnchain.pox_constants = PoxConstants::new(10, 5, 3, 3, 0, 99, 100, u32::max_value()); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); burnchain.first_block_timestamp = first_timestamp; @@ -1500,7 +1501,7 @@ fn test_update_pox_affirmation_maps_absent() { let first_height = 0; let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = PoxConstants::new(10, 5, 3, 3, 0, 99, 100); + burnchain.pox_constants = PoxConstants::new(10, 5, 3, 3, 0, 99, 100, u32::max_value()); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); burnchain.first_block_timestamp = first_timestamp; @@ -1858,7 +1859,7 @@ fn test_update_pox_affirmation_maps_nothing() { let first_height = 0; let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = PoxConstants::new(10, 5, 3, 3, 0, 99, 100); + burnchain.pox_constants = PoxConstants::new(10, 5, 3, 3, 0, 99, 100, u32::max_value()); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); burnchain.first_block_timestamp = first_timestamp; @@ -2062,7 +2063,7 @@ fn test_update_pox_affirmation_fork_2_cycles() { let first_height = 0; let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = PoxConstants::new(5, 2, 2, 25, 5, 99, 100); + burnchain.pox_constants = PoxConstants::new(5, 2, 2, 25, 5, 99, 100, u32::max_value()); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); burnchain.first_block_timestamp = first_timestamp; @@ -2278,7 +2279,7 @@ fn test_update_pox_affirmation_fork_duel() { let first_height = 0; let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = PoxConstants::new(5, 2, 2, 25, 5, 99, 100); + burnchain.pox_constants = PoxConstants::new(5, 2, 2, 25, 5, 99, 100, u32::max_value()); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); burnchain.first_block_timestamp = first_timestamp; From cf20f1a4d4cd94f44b9b87eaaf1b1e731cd4ce7d Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 19 Aug 2021 21:32:13 -0400 Subject: [PATCH 048/116] fix: add test coverage for getting, setting, and querying anchor block status and for testing code paths for updating the affirmation status for both genesis-building commits and invalid commits --- src/burnchains/tests/db.rs | 205 ++++++++++++++++++++++++++++++++++++- 1 file changed, 204 insertions(+), 1 deletion(-) diff --git a/src/burnchains/tests/db.rs b/src/burnchains/tests/db.rs index 9ef9cec06a..c73c68c7b8 100644 --- a/src/burnchains/tests/db.rs +++ b/src/burnchains/tests/db.rs @@ -18,6 +18,7 @@ use std::cmp; use std::convert::TryInto; use address::*; +use burnchains::affirmation::AffirmationMap; use burnchains::bitcoin::address::*; use burnchains::bitcoin::blocks::*; use burnchains::bitcoin::*; @@ -501,7 +502,7 @@ fn test_get_commit_at() { let first_height = 1; let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = PoxConstants::new(5, 3, 2, 3, 0, 99, 100); + burnchain.pox_constants = PoxConstants::new(5, 3, 2, 3, 0, 99, 100, u32::max_value()); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); burnchain.first_block_timestamp = first_timestamp; @@ -589,3 +590,205 @@ fn test_get_commit_at() { let cmt = BurnchainDB::get_commit_at(&burnchain_db.conn(), &headers, 4, 0).unwrap(); assert!(cmt.is_none()); } + +#[test] +fn test_get_set_check_anchor_block() { + let first_bhh = BurnchainHeaderHash([0; 32]); + let first_timestamp = 0; + let first_height = 1; + + let mut burnchain = Burnchain::regtest(":memory:"); + burnchain.pox_constants = PoxConstants::new(5, 3, 2, 3, 0, 99, 100, u32::max_value()); + burnchain.first_block_height = first_height; + burnchain.first_block_hash = first_bhh.clone(); + burnchain.first_block_timestamp = first_timestamp; + + let mut burnchain_db = BurnchainDB::connect(":memory:", &burnchain, true).unwrap(); + + let first_block_header = burnchain_db.get_canonical_chain_tip().unwrap(); + + let mut headers = vec![first_block_header.clone()]; + let mut parent = None; + let mut parent_block_header: Option = None; + let mut cmts = vec![]; + + for i in 0..5 { + let hdr = BurnchainHeaderHash([(i + 1) as u8; 32]); + let block_header = BurnchainBlockHeader { + block_height: (first_height + i) as u64, + block_hash: hdr, + parent_block_hash: parent_block_header + .as_ref() + .map(|blk| blk.block_hash.clone()) + .unwrap_or(first_block_header.block_hash.clone()), + num_txs: 1, + timestamp: i as u64, + }; + + headers.push(block_header.clone()); + parent_block_header = Some(block_header); + } + + for i in 0..5 { + let block_header = &headers[i + 1]; + + let cmt = make_simple_block_commit( + &burnchain, + parent.as_ref(), + block_header, + BlockHeaderHash([((i + 1) as u8) | 0x80; 32]), + ); + burnchain_db + .store_new_burnchain_block_ops_unchecked( + &burnchain, + &headers, + block_header, + &vec![BlockstackOperationType::LeaderBlockCommit(cmt.clone())], + ) + .unwrap(); + + cmts.push(cmt.clone()); + parent = Some(cmt); + } + + assert!(!BurnchainDB::has_anchor_block(burnchain_db.conn(), 1).unwrap()); + + { + let tx = burnchain_db.tx_begin().unwrap(); + tx.set_anchor_block(&cmts[3], 1).unwrap(); + tx.commit().unwrap(); + } + + assert!(BurnchainDB::has_anchor_block(burnchain_db.conn(), 1).unwrap()); + assert_eq!( + BurnchainDB::get_anchor_block_commit(burnchain_db.conn(), 1) + .unwrap() + .unwrap() + .0, + cmts[3] + ); + assert!(BurnchainDB::is_anchor_block( + burnchain_db.conn(), + &cmts[3].burn_header_hash, + &cmts[3].txid + ) + .unwrap()); +} + +#[test] +fn test_update_block_descendancy() { + let first_bhh = BurnchainHeaderHash([0; 32]); + let first_timestamp = 0; + let first_height = 1; + + let mut burnchain = Burnchain::regtest(":memory:"); + burnchain.pox_constants = PoxConstants::new(5, 3, 2, 3, 0, 99, 100, u32::max_value()); + burnchain.first_block_height = first_height; + burnchain.first_block_hash = first_bhh.clone(); + burnchain.first_block_timestamp = first_timestamp; + + let mut burnchain_db = BurnchainDB::connect(":memory:", &burnchain, true).unwrap(); + + let first_block_header = burnchain_db.get_canonical_chain_tip().unwrap(); + + let mut headers = vec![first_block_header.clone()]; + let mut parent = None; + let mut parent_block_header: Option = None; + let mut cmts = vec![]; + let mut cmts_genesis = vec![]; + let mut cmts_invalid = vec![]; + + for i in 0..5 { + let hdr = BurnchainHeaderHash([(i + 1) as u8; 32]); + let block_header = BurnchainBlockHeader { + block_height: (first_height + i) as u64, + block_hash: hdr, + parent_block_hash: parent_block_header + .as_ref() + .map(|blk| blk.block_hash.clone()) + .unwrap_or(first_block_header.block_hash.clone()), + num_txs: 3, + timestamp: i as u64, + }; + + headers.push(block_header.clone()); + parent_block_header = Some(block_header); + } + + let mut am_id = 0; + + for i in 0..5 { + let block_header = &headers[i + 1]; + + let cmt = make_simple_block_commit( + &burnchain, + parent.as_ref(), + block_header, + BlockHeaderHash([((i + 1) as u8) | 0x80; 32]), + ); + + // make a second commit that builds off of genesis + let mut cmt_genesis = cmt.clone(); + cmt_genesis.parent_block_ptr = 0; + cmt_genesis.parent_vtxindex = 0; + cmt_genesis.block_header_hash = BlockHeaderHash([((i + 1) as u8) | 0xa0; 32]); + cmt_genesis.txid = next_txid(); + + // make an invalid commit + let mut cmt_invalid = cmt.clone(); + cmt_invalid.parent_vtxindex += 1; + cmt_invalid.block_header_hash = BlockHeaderHash([((i + 1) as u8) | 0xc0; 32]); + cmt_invalid.txid = next_txid(); + + burnchain_db + .store_new_burnchain_block_ops_unchecked( + &burnchain, + &headers, + block_header, + &vec![ + BlockstackOperationType::LeaderBlockCommit(cmt.clone()), + BlockstackOperationType::LeaderBlockCommit(cmt_genesis.clone()), + BlockstackOperationType::LeaderBlockCommit(cmt_invalid.clone()), + ], + ) + .unwrap(); + + cmts.push(cmt.clone()); + cmts_genesis.push(cmt_genesis.clone()); + cmts_invalid.push(cmt_invalid.clone()); + + parent = Some(cmt); + + if i == 0 { + am_id = { + let tx = burnchain_db.tx_begin().unwrap(); + tx.set_anchor_block(&cmts[0], 1).unwrap(); + let am_id = tx + .insert_block_commit_affirmation_map(&AffirmationMap::decode("p").unwrap()) + .unwrap(); + tx.update_block_commit_affirmation(&cmts[0], Some(1), am_id) + .unwrap(); + tx.commit().unwrap(); + am_id + }; + assert_ne!(am_id, 0); + } + } + + // each valid commit should have cmts[0]'s affirmation map + for i in 1..5 { + let cmt_am_id = + BurnchainDB::get_block_commit_affirmation_id(burnchain_db.conn(), &cmts[i]).unwrap(); + assert_eq!(cmt_am_id.unwrap(), am_id); + + let genesis_am_id = + BurnchainDB::get_block_commit_affirmation_id(burnchain_db.conn(), &cmts_genesis[i]) + .unwrap(); + assert_eq!(genesis_am_id.unwrap(), 0); + + let invalid_am_id = + BurnchainDB::get_block_commit_affirmation_id(burnchain_db.conn(), &cmts_invalid[i]) + .unwrap(); + assert_eq!(invalid_am_id.unwrap(), 0); + } +} From 066870cafd6d5f7c4fc856766dc8f4e758ad6e02 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 23 Aug 2021 01:20:30 -0400 Subject: [PATCH 049/116] feat: expose the means by which we create mocked SPV headers, so we can do so in tests --- src/burnchains/bitcoin/indexer.rs | 62 ++++++++++++++++++++++++++----- 1 file changed, 53 insertions(+), 9 deletions(-) diff --git a/src/burnchains/bitcoin/indexer.rs b/src/burnchains/bitcoin/indexer.rs index d97fc7a35a..a27227522a 100644 --- a/src/burnchains/bitcoin/indexer.rs +++ b/src/burnchains/bitcoin/indexer.rs @@ -640,20 +640,36 @@ impl BitcoinIndexer { true, false, )?; + spv_client.disable_check_txcount(); + let hdr = LoneBlockHeader { - header: BlockHeader { - bits: 0, - merkle_root: Sha256dHash([0u8; 32]), - nonce: 0, - prev_blockhash: header.parent_block_hash.to_bitcoin_hash(), - time: header.timestamp as u32, - version: 0x20000000, - }, + header: BitcoinIndexer::mock_bitcoin_header( + &header.parent_block_hash, + header.timestamp as u32, + ), tx_count: VarInt(header.num_txs), }; - spv_client.write_block_headers(header.block_height, vec![hdr])?; + + assert!(header.block_height > 0); + let start_height = header.block_height - 1; + spv_client.insert_block_headers_after(start_height, vec![hdr])?; Ok(()) } + + #[cfg(test)] + pub fn mock_bitcoin_header( + parent_block_hash: &BurnchainHeaderHash, + timestamp: u32, + ) -> BlockHeader { + BlockHeader { + bits: 0, + merkle_root: Sha256dHash([0u8; 32]), + nonce: 0, + prev_blockhash: parent_block_hash.to_bitcoin_hash(), + time: timestamp, + version: 0x20000000, + } + } } impl Drop for BitcoinIndexer { @@ -1297,4 +1313,32 @@ mod test { let last_block = indexer.sync_headers(0, None).unwrap(); eprintln!("sync'ed to block {}", last_block); } + + #[test] + fn test_load_store_mock_bitcoin_header() { + let parent_block_header_hash = BurnchainHeaderHash::from_hex( + "0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e2206", + ) + .unwrap(); + + let hdr = BurnchainBlockHeader { + block_height: 123, + block_hash: BurnchainHeaderHash::from_bitcoin_hash( + &BitcoinIndexer::mock_bitcoin_header(&parent_block_header_hash, 456).bitcoin_hash(), + ), + parent_block_hash: parent_block_header_hash.clone(), + num_txs: 1, + timestamp: 456, + }; + + let mut indexer = BitcoinIndexer::new_unit_test("/tmp/test_load_store_mock_bitcoin_header"); + + indexer.raw_store_header(hdr.clone()).unwrap(); + + let mut hdr_from_db = indexer.read_burnchain_header(123).unwrap().unwrap(); + + // only txcount will be different + hdr_from_db.num_txs = 1; + assert_eq!(hdr_from_db, hdr); + } } From 79241f4cb20d7e31b27f0cd38ad9fdb46464795b Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 23 Aug 2021 01:21:09 -0400 Subject: [PATCH 050/116] feat: optionally disable strict SPV header validation in testing -- namely, to permit the txcount to be nonzero --- src/burnchains/bitcoin/spv.rs | 59 +++++++++++++++++++++++++---------- 1 file changed, 42 insertions(+), 17 deletions(-) diff --git a/src/burnchains/bitcoin/spv.rs b/src/burnchains/bitcoin/spv.rs index 4809e120bd..cf2fce28ca 100644 --- a/src/burnchains/bitcoin/spv.rs +++ b/src/burnchains/bitcoin/spv.rs @@ -36,6 +36,8 @@ use burnchains::bitcoin::BitcoinNetworkType; use burnchains::bitcoin::Error as btc_error; use burnchains::bitcoin::PeerMessage; +use types::chainstate::BurnchainHeaderHash; + use rusqlite::types::{FromSql, FromSqlError, FromSqlResult, ToSql, ToSqlOutput, ValueRef}; use rusqlite::Row; use rusqlite::Transaction; @@ -51,15 +53,15 @@ use util::log; const BLOCK_HEADER_SIZE: u64 = 81; -const BITCOIN_GENESIS_BLOCK_HASH_MAINNET: &'static str = +pub const BITCOIN_GENESIS_BLOCK_HASH_MAINNET: &'static str = "000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f"; -const BITCOIN_GENESIS_BLOCK_MERKLE_ROOT_MAINNET: &'static str = +pub const BITCOIN_GENESIS_BLOCK_MERKLE_ROOT_MAINNET: &'static str = "4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b"; -const BITCOIN_GENESIS_BLOCK_HASH_TESTNET: &'static str = +pub const BITCOIN_GENESIS_BLOCK_HASH_TESTNET: &'static str = "000000000933ea01ad0ee984209779baaec3ced90fa3f408719526f8d77f4943"; -const BITCOIN_GENESIS_BLOCK_HASH_REGTEST: &'static str = +pub const BITCOIN_GENESIS_BLOCK_HASH_REGTEST: &'static str = "0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e2206"; pub const BLOCK_DIFFICULTY_CHUNK_SIZE: u64 = 2016; @@ -91,6 +93,7 @@ pub struct SpvClient { readwrite: bool, reverse_order: bool, headers_db: DBConn, + check_txcount: bool, } impl FromSql for Sha256dHash { @@ -143,6 +146,7 @@ impl SpvClient { readwrite: bool, reverse_order: bool, ) -> Result { + let exists = fs::metadata(headers_path).is_ok(); let conn = SpvClient::db_open(headers_path, readwrite)?; let mut client = SpvClient { headers_path: headers_path.to_owned(), @@ -153,15 +157,21 @@ impl SpvClient { readwrite: readwrite, reverse_order: reverse_order, headers_db: conn, + check_txcount: true, }; - if readwrite { + if readwrite && !exists { client.init_block_headers()?; } Ok(client) } + #[cfg(test)] + pub fn disable_check_txcount(&mut self) { + self.check_txcount = false; + } + pub fn conn(&self) -> &DBConn { &self.headers_db } @@ -251,15 +261,21 @@ impl SpvClient { fn validate_header_integrity( start_height: u64, headers: &Vec, + check_txcount: bool, ) -> Result<(), btc_error> { if headers.len() == 0 { return Ok(()); } - for i in 0..headers.len() { - if headers[i].tx_count != VarInt(0) { - warn!("Non-zero tx count on header offset {}", i); - return Err(btc_error::InvalidReply); + if check_txcount { + for i in 0..headers.len() { + if headers[i].tx_count != VarInt(0) { + warn!( + "Non-zero tx count on header offset {}: {:?}", + i, &headers[i].tx_count + ); + return Err(btc_error::InvalidReply); + } } } @@ -440,6 +456,13 @@ impl SpvClient { &header.nonce, &u64_to_sql(height)?, ]; + + test_debug!( + "SPV: insert header {} {}: {:?}", + height, + &header.bitcoin_hash(), + &header + ); tx.execute(sql, args) .map_err(|e| btc_error::DBError(db_error::SqliteError(e))) .and_then(|_x| Ok(())) @@ -603,10 +626,11 @@ impl SpvClient { start_height ); - SpvClient::validate_header_integrity(start_height, &block_headers).map_err(|e| { - error!("Received invalid headers: {:?}", &e); - e - })?; + SpvClient::validate_header_integrity(start_height, &block_headers, self.check_txcount) + .map_err(|e| { + error!("Received invalid headers: {:?}", &e); + e + })?; let parent_header = match self.read_block_header(start_height)? { Some(header) => header, @@ -656,10 +680,11 @@ impl SpvClient { end_height ); - SpvClient::validate_header_integrity(start_height, &block_headers).map_err(|e| { - error!("Received invalid headers: {:?}", &e); - e - })?; + SpvClient::validate_header_integrity(start_height, &block_headers, self.check_txcount) + .map_err(|e| { + error!("Received invalid headers: {:?}", &e); + e + })?; match self.read_block_header(end_height)? { Some(child_header) => { From d7972da833257c073403dec3c2ac3a7f297e328a Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 23 Aug 2021 01:21:38 -0400 Subject: [PATCH 051/116] refactor: expose the means by which we process each reward cycle's affirmation maps at reward cycle boundaries --- src/burnchains/burnchain.rs | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/src/burnchains/burnchain.rs b/src/burnchains/burnchain.rs index 92105d62df..60c608a817 100644 --- a/src/burnchains/burnchain.rs +++ b/src/burnchains/burnchain.rs @@ -851,8 +851,26 @@ impl Burnchain { ); burnchain_db.store_new_burnchain_block(burnchain, indexer, &block)?; - let block_height = block.block_height(); + Burnchain::process_affirmation_maps( + burnchain, + burnchain_db, + indexer, + block.block_height(), + )?; + + let header = block.header(); + Ok(header) + } + /// Update the affirmation maps for the previous reward cycle's commits. + /// This is a no-op unless the given burnchain block height falls on a reward cycle boundary. In that + /// case, the previous reward cycle's block commits' affirmation maps are all re-calculated. + pub fn process_affirmation_maps( + burnchain: &Burnchain, + burnchain_db: &mut BurnchainDB, + indexer: &B, + block_height: u64, + ) -> Result<(), burnchain_error> { let this_reward_cycle = burnchain .block_height_to_reward_cycle(block_height) .unwrap_or(0); @@ -872,10 +890,7 @@ impl Burnchain { ); update_pox_affirmation_maps(burnchain_db, indexer, prev_reward_cycle, burnchain)?; } - - let header = block.header(); - - Ok(header) + Ok(()) } /// Hand off the block to the ChainsCoordinator _and_ process the sortition From 0f0f5af73d8e1bdfb3e5504af4815a52b9a05b27 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 23 Aug 2021 01:22:07 -0400 Subject: [PATCH 052/116] refactor: more debug output --- src/burnchains/db.rs | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/src/burnchains/db.rs b/src/burnchains/db.rs index 69a2405df4..f44ccb7f36 100644 --- a/src/burnchains/db.rs +++ b/src/burnchains/db.rs @@ -483,10 +483,13 @@ impl<'a> BurnchainDBTransaction<'a> { } else { // this is an invalid commit -- no parent found test_debug!( - "No block-commit parent found for {},{},{}", + "No block-commit parent {},{} found for {},{},{} (in {})", + commit.parent_block_ptr, + commit.parent_vtxindex, &commit.txid, commit.block_height, - commit.vtxindex + commit.vtxindex, + &commit.burn_header_hash ); } self.update_block_commit_affirmation(commit, None, 0)?; @@ -835,6 +838,13 @@ impl<'a> BurnchainDBTransaction<'a> { stmt.execute(args)?; } + test_debug!( + "Add {} block ops to {} height {} (parent {})", + block_ops.len(), + &block_header.block_hash, + &block_header.block_height, + &block_header.parent_block_hash + ); for op in block_ops.iter() { if let BlockstackOperationType::LeaderBlockCommit(ref opdata) = op { let bcm = BlockCommitMetadata { @@ -1229,6 +1239,14 @@ impl BurnchainDB { let db_tx = self.tx_begin()?; + test_debug!( + "Store raw block {},{} (parent {}) with {} ops", + &header.block_hash, + header.block_height, + &header.parent_block_hash, + blockstack_ops.len() + ); + db_tx.store_burnchain_db_entry(&header)?; db_tx.store_blockstack_ops(burnchain, indexer, &header, &blockstack_ops)?; @@ -1295,6 +1313,8 @@ impl BurnchainDB { } }; + test_debug!("Header at {}: {}", block_ptr, &header_hash); + BurnchainDB::get_commit_in_block_at(conn, &header_hash, block_ptr, vtxindex) } From 47a8e82541b428e4ee65b7dc83225bb1eda3602a Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 23 Aug 2021 01:22:39 -0400 Subject: [PATCH 053/116] fix: when creating mocked sortitions, make it so the BlockSnapshot's burn header hashes match the SPV header database. This is required for affirmation map calculations to work in the chains coordinator. --- src/burnchains/tests/mod.rs | 36 ++++++++++++++++++++++++++---------- 1 file changed, 26 insertions(+), 10 deletions(-) diff --git a/src/burnchains/tests/mod.rs b/src/burnchains/tests/mod.rs index f1893cb780..16346ff033 100644 --- a/src/burnchains/tests/mod.rs +++ b/src/burnchains/tests/mod.rs @@ -40,6 +40,9 @@ use util::secp256k1::*; use util::vrf::*; use crate::types::chainstate::{BlockHeaderHash, SortitionId, VRFSeed}; +use deps::bitcoin::network::serialize::BitcoinHash; + +use burnchains::bitcoin::spv::BITCOIN_GENESIS_BLOCK_HASH_REGTEST; impl Txid { pub fn from_test_data( @@ -572,30 +575,40 @@ impl TestBurnchainBlock { burnchain: &Burnchain, coord: &mut ChainsCoordinator<'a, T, N, R>, ) -> BlockSnapshot { - let block_hash = BurnchainHeaderHash::from_test_data( - self.block_height, - &self.parent_snapshot.index_root, - self.fork_id, + let mut indexer = BitcoinIndexer::new_unit_test(&burnchain.working_dir); + let parent_hdr = indexer + .read_burnchain_header(self.block_height.saturating_sub(1)) + .unwrap() + .unwrap(); + let now = get_epoch_time_secs(); + let block_hash = BurnchainHeaderHash::from_bitcoin_hash( + &BitcoinIndexer::mock_bitcoin_header(&parent_hdr.block_hash, now as u32).bitcoin_hash(), ); let mock_bitcoin_block = BitcoinBlock::new( self.block_height, &block_hash, &self.parent_snapshot.burn_header_hash, &vec![], - get_epoch_time_secs(), + now, ); let block = BurnchainBlock::Bitcoin(mock_bitcoin_block); + let header = BurnchainBlockHeader { + block_height: block.block_height(), + block_hash: block_hash.clone(), + parent_block_hash: parent_hdr.block_hash.clone(), + num_txs: block.header().num_txs, + timestamp: block.header().timestamp, + }; test_debug!( - "Process PoX block {} {}", + "Process PoX block {} {}: {:?}", block.block_height(), - &block.block_hash() + &block.block_hash(), + &header ); - let header = block.header(); - let indexer = BitcoinIndexer::new_unit_test(&burnchain.working_dir); - let mut burnchain_db = BurnchainDB::open(&burnchain.get_burnchaindb_path(), true).unwrap(); + indexer.raw_store_header(header.clone()).unwrap(); burnchain_db .raw_store_burnchain_block(burnchain, &indexer, header.clone(), self.txs.clone()) .unwrap(); @@ -603,6 +616,9 @@ impl TestBurnchainBlock { coord.handle_new_burnchain_block().unwrap(); let snapshot = SortitionDB::get_canonical_burn_chain_tip(db.conn()).unwrap(); + + assert_eq!(snapshot.burn_header_hash, header.block_hash); + assert_eq!(snapshot.burn_header_hash, block.block_hash()); snapshot } } From 18f082bca26ec5851539b9ed616f5f7cf41dfe23 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 23 Aug 2021 03:18:07 -0400 Subject: [PATCH 054/116] fix: use the burnchain hash as the stubbed sortition ID when instantiating a burnchain test --- src/burnchains/tests/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/burnchains/tests/mod.rs b/src/burnchains/tests/mod.rs index 16346ff033..88b6087346 100644 --- a/src/burnchains/tests/mod.rs +++ b/src/burnchains/tests/mod.rs @@ -634,7 +634,7 @@ impl TestBurnchainFork { start_height, mined: 0, tip_header_hash: start_header_hash.clone(), - tip_sortition_id: SortitionId([0x00; 32]), + tip_sortition_id: SortitionId::stubbed(&start_header_hash), tip_index_root: start_index_root.clone(), blocks: vec![], pending_blocks: vec![], From 223fd4346a7e69e032b21f1502a9fd39cc8b229e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 23 Aug 2021 03:19:01 -0400 Subject: [PATCH 055/116] refactor: debug the initial block the sortdb starts at --- src/chainstate/burn/db/sortdb.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/chainstate/burn/db/sortdb.rs b/src/chainstate/burn/db/sortdb.rs index fc9e3cb354..6903f4e82a 100644 --- a/src/chainstate/burn/db/sortdb.rs +++ b/src/chainstate/burn/db/sortdb.rs @@ -2192,7 +2192,10 @@ impl SortitionDB { first_burn_header_timestamp: u64, epochs_ref: &[StacksEpoch], ) -> Result<(), db_error> { - debug!("Instantiate SortDB"); + debug!( + "Instantiate SortDB: first block is {},{}", + first_block_height, first_burn_header_hash + ); sql_pragma(self.conn(), "PRAGMA journal_mode = WAL;")?; @@ -3011,7 +3014,7 @@ impl SortitionDB { }) .map(|x| { if x.is_none() { - test_debug!("No snapshot with burn hash {}", sortition_id); + test_debug!("No snapshot with sortition ID {}", sortition_id); } x }) From 78c64cded7004e7aad160ef172df737f8901bb59 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 23 Aug 2021 03:20:00 -0400 Subject: [PATCH 056/116] fix: start all pox-based tests on the first bitcoin regtest block hash --- src/chainstate/stacks/boot/mod.rs | 62 +++++++++++++++++++++++++------ 1 file changed, 50 insertions(+), 12 deletions(-) diff --git a/src/chainstate/stacks/boot/mod.rs b/src/chainstate/stacks/boot/mod.rs index cc5c1dbcfd..7a8a68bfe5 100644 --- a/src/chainstate/stacks/boot/mod.rs +++ b/src/chainstate/stacks/boot/mod.rs @@ -50,6 +50,8 @@ use crate::types::chainstate::StacksBlockId; use crate::util::boot; use crate::vm::{costs::LimitedCostTracker, SymbolicExpression}; +use core::BITCOIN_REGTEST_FIRST_BLOCK_HASH; + const BOOT_CODE_POX_BODY: &'static str = std::include_str!("pox.clar"); const BOOT_CODE_POX_TESTNET_CONSTS: &'static str = std::include_str!("pox-testnet.clar"); const BOOT_CODE_POX_MAINNET_CONSTS: &'static str = std::include_str!("pox-mainnet.clar"); @@ -1139,7 +1141,10 @@ pub mod test { #[test] fn test_liquid_ustx() { - let mut burnchain = Burnchain::default_unittest(0, &BurnchainHeaderHash::zero()); + let mut burnchain = Burnchain::default_unittest( + 0, + &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), + ); burnchain.pox_constants.reward_cycle_length = 5; burnchain.pox_constants.prepare_length = 2; burnchain.pox_constants.anchor_threshold = 1; @@ -1324,7 +1329,10 @@ pub mod test { #[test] fn test_hook_special_contract_call() { - let mut burnchain = Burnchain::default_unittest(0, &BurnchainHeaderHash::zero()); + let mut burnchain = Burnchain::default_unittest( + 0, + &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), + ); burnchain.pox_constants.reward_cycle_length = 3; burnchain.pox_constants.prepare_length = 1; burnchain.pox_constants.anchor_threshold = 1; @@ -1437,7 +1445,10 @@ pub mod test { #[test] fn test_liquid_ustx_burns() { - let mut burnchain = Burnchain::default_unittest(0, &BurnchainHeaderHash::zero()); + let mut burnchain = Burnchain::default_unittest( + 0, + &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), + ); burnchain.pox_constants.reward_cycle_length = 5; burnchain.pox_constants.prepare_length = 2; burnchain.pox_constants.anchor_threshold = 1; @@ -1542,7 +1553,10 @@ pub mod test { #[test] fn test_pox_lockup_single_tx_sender() { - let mut burnchain = Burnchain::default_unittest(0, &BurnchainHeaderHash::zero()); + let mut burnchain = Burnchain::default_unittest( + 0, + &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), + ); burnchain.pox_constants.reward_cycle_length = 5; burnchain.pox_constants.prepare_length = 2; burnchain.pox_constants.anchor_threshold = 1; @@ -1751,7 +1765,10 @@ pub mod test { #[test] fn test_pox_lockup_single_tx_sender_100() { - let mut burnchain = Burnchain::default_unittest(0, &BurnchainHeaderHash::zero()); + let mut burnchain = Burnchain::default_unittest( + 0, + &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), + ); burnchain.pox_constants.reward_cycle_length = 4; // 4 reward slots burnchain.pox_constants.prepare_length = 2; burnchain.pox_constants.anchor_threshold = 1; @@ -2011,7 +2028,10 @@ pub mod test { #[test] fn test_pox_lockup_contract() { - let mut burnchain = Burnchain::default_unittest(0, &BurnchainHeaderHash::zero()); + let mut burnchain = Burnchain::default_unittest( + 0, + &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), + ); burnchain.pox_constants.reward_cycle_length = 5; burnchain.pox_constants.prepare_length = 2; burnchain.pox_constants.anchor_threshold = 1; @@ -2275,7 +2295,10 @@ pub mod test { #[test] fn test_pox_lockup_multi_tx_sender() { - let mut burnchain = Burnchain::default_unittest(0, &BurnchainHeaderHash::zero()); + let mut burnchain = Burnchain::default_unittest( + 0, + &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), + ); burnchain.pox_constants.reward_cycle_length = 5; burnchain.pox_constants.prepare_length = 2; burnchain.pox_constants.anchor_threshold = 1; @@ -2485,7 +2508,10 @@ pub mod test { #[test] fn test_pox_lockup_no_double_stacking() { - let mut burnchain = Burnchain::default_unittest(0, &BurnchainHeaderHash::zero()); + let mut burnchain = Burnchain::default_unittest( + 0, + &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), + ); burnchain.pox_constants.reward_cycle_length = 5; burnchain.pox_constants.prepare_length = 2; burnchain.pox_constants.anchor_threshold = 1; @@ -2698,7 +2724,10 @@ pub mod test { #[test] fn test_pox_lockup_single_tx_sender_unlock() { - let mut burnchain = Burnchain::default_unittest(0, &BurnchainHeaderHash::zero()); + let mut burnchain = Burnchain::default_unittest( + 0, + &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), + ); burnchain.pox_constants.reward_cycle_length = 5; burnchain.pox_constants.prepare_length = 2; burnchain.pox_constants.anchor_threshold = 1; @@ -2936,7 +2965,10 @@ pub mod test { #[test] fn test_pox_lockup_unlock_relock() { - let mut burnchain = Burnchain::default_unittest(0, &BurnchainHeaderHash::zero()); + let mut burnchain = Burnchain::default_unittest( + 0, + &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), + ); burnchain.pox_constants.reward_cycle_length = 5; burnchain.pox_constants.prepare_length = 2; burnchain.pox_constants.anchor_threshold = 1; @@ -3455,7 +3487,10 @@ pub mod test { #[test] fn test_pox_lockup_unlock_on_spend() { - let mut burnchain = Burnchain::default_unittest(0, &BurnchainHeaderHash::zero()); + let mut burnchain = Burnchain::default_unittest( + 0, + &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), + ); burnchain.pox_constants.reward_cycle_length = 5; burnchain.pox_constants.prepare_length = 2; burnchain.pox_constants.anchor_threshold = 1; @@ -3902,7 +3937,10 @@ pub mod test { #[test] fn test_pox_lockup_reject() { - let mut burnchain = Burnchain::default_unittest(0, &BurnchainHeaderHash::zero()); + let mut burnchain = Burnchain::default_unittest( + 0, + &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), + ); burnchain.pox_constants.reward_cycle_length = 5; burnchain.pox_constants.prepare_length = 2; burnchain.pox_constants.anchor_threshold = 1; From 7dacc54a5729e02b425b6f258174f373991fa974 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 23 Aug 2021 03:20:34 -0400 Subject: [PATCH 057/116] fix: start all pox tests on the first bitcoin regtest block --- src/chainstate/stacks/boot/pox_2_tests.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/chainstate/stacks/boot/pox_2_tests.rs b/src/chainstate/stacks/boot/pox_2_tests.rs index 58089cf71b..a92e947ba4 100644 --- a/src/chainstate/stacks/boot/pox_2_tests.rs +++ b/src/chainstate/stacks/boot/pox_2_tests.rs @@ -51,6 +51,8 @@ use clarity_vm::clarity::Error as ClarityError; use super::test::*; +use core::*; + const USTX_PER_HOLDER: u128 = 1_000_000; /// Return the BlockSnapshot for the latest sortition in the provided @@ -78,7 +80,10 @@ fn test_simple_pox_lockup_transition_pox_2() { // tenures start being tracked. let EMPTY_SORTITIONS = 25; - let mut burnchain = Burnchain::default_unittest(0, &BurnchainHeaderHash::zero()); + let mut burnchain = Burnchain::default_unittest( + 0, + &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), + ); burnchain.pox_constants.reward_cycle_length = 5; burnchain.pox_constants.prepare_length = 2; burnchain.pox_constants.anchor_threshold = 1; From 3ae302c862f4a0cc4b8bc459732d5970d8daa6bd Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 23 Aug 2021 03:20:52 -0400 Subject: [PATCH 058/116] fix: first regtest bitcoin block hash was wrong --- src/core/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/core/mod.rs b/src/core/mod.rs index 5809ee5238..df3e9233b1 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -94,7 +94,7 @@ pub const BITCOIN_TESTNET_FIRST_BLOCK_HASH: &str = pub const BITCOIN_REGTEST_FIRST_BLOCK_HEIGHT: u64 = 0; pub const BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP: u32 = 0; pub const BITCOIN_REGTEST_FIRST_BLOCK_HASH: &str = - "0000000000000000000000000000000000000000000000000000000000000000"; + "0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e2206"; pub const FIRST_STACKS_BLOCK_HASH: BlockHeaderHash = BlockHeaderHash([0u8; 32]); pub const EMPTY_MICROBLOCK_PARENT_HASH: BlockHeaderHash = BlockHeaderHash([0u8; 32]); From 24a021684132ecfeea08b955d423892071fe3535 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 23 Aug 2021 03:21:06 -0400 Subject: [PATCH 059/116] fix: when generating block snapshots in network tests, use the SPV headers to set the next burnchain block hash so the sortition DB and SPV DB stay in sync. This is now necessary since we're testing with affirmation maps. --- src/net/mod.rs | 67 +++++++++++++++++++++++++++++++++++--------------- 1 file changed, 47 insertions(+), 20 deletions(-) diff --git a/src/net/mod.rs b/src/net/mod.rs index e200acb179..10627131bd 100644 --- a/src/net/mod.rs +++ b/src/net/mod.rs @@ -1838,6 +1838,7 @@ pub mod test { use burnchains::bitcoin::*; use burnchains::burnchain::*; use burnchains::db::BurnchainDB; + use burnchains::db::BurnchainHeaderReader; use burnchains::tests::*; use burnchains::*; use chainstate::burn::db::sortdb; @@ -1885,8 +1886,12 @@ pub mod test { codec::StacksMessageCodec, }; + use deps::bitcoin::network::serialize::BitcoinHash; + use super::*; + use burnchains::bitcoin::spv::BITCOIN_GENESIS_BLOCK_HASH_REGTEST; + impl StacksMessageCodec for BlockstackOperationType { fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { match self { @@ -2180,10 +2185,7 @@ pub mod test { let start_block = 0; let mut burnchain = Burnchain::default_unittest( start_block, - &BurnchainHeaderHash::from_hex( - "0000000000000000000000000000000000000000000000000000000000000000", - ) - .unwrap(), + &BurnchainHeaderHash::from_hex(BITCOIN_GENESIS_BLOCK_HASH_REGTEST).unwrap(), ); burnchain.pox_constants = PoxConstants::new( 5, @@ -2759,23 +2761,34 @@ pub mod test { TestPeer::set_ops_consensus_hash(&mut blockstack_ops, &tip.consensus_hash); } - // quick'n'dirty hash of all operations and block height - let mut op_buf = vec![]; - for op in blockstack_ops.iter() { - op.consensus_serialize(&mut op_buf).unwrap(); - } - op_buf.append(&mut (tip.block_height + 1).to_be_bytes().to_vec()); - let h = Sha512Trunc256Sum::from_data(&op_buf); - let mut hash_buf = [0u8; 32]; - hash_buf.copy_from_slice(&h.0); - - let block_header_hash = BurnchainHeaderHash(hash_buf); - let block_header = BurnchainBlockHeader::from_parent_snapshot( - &tip, - block_header_hash.clone(), - blockstack_ops.len() as u64, + let mut indexer = BitcoinIndexer::new_unit_test(&self.config.burnchain.working_dir); + let parent_hdr = indexer + .read_burnchain_header(tip.block_height) + .unwrap() + .unwrap(); + + test_debug!("parent hdr ({}): {:?}", &tip.block_height, &parent_hdr); + assert_eq!(parent_hdr.block_hash, tip.burn_header_hash); + + let now = get_epoch_time_secs(); + let block_header_hash = BurnchainHeaderHash::from_bitcoin_hash( + &BitcoinIndexer::mock_bitcoin_header(&parent_hdr.block_hash, now as u32) + .bitcoin_hash(), + ); + test_debug!( + "Block header hash at {} is {}", + tip.block_height + 1, + &block_header_hash ); + let block_header = BurnchainBlockHeader { + block_height: tip.block_height + 1, + block_hash: block_header_hash.clone(), + parent_block_hash: parent_hdr.block_hash.clone(), + num_txs: blockstack_ops.len() as u64, + timestamp: now, + }; + if set_burn_hash { TestPeer::set_ops_burn_header_hash(&mut blockstack_ops, &block_header_hash); } @@ -2783,7 +2796,13 @@ pub mod test { let mut burnchain_db = BurnchainDB::open(&self.config.burnchain.get_burnchaindb_path(), true).unwrap(); - let indexer = BitcoinIndexer::new_unit_test(&self.config.burnchain.working_dir); + test_debug!( + "Store header and block ops for {}-{} ({})", + &block_header.block_hash, + &block_header.parent_block_hash, + block_header.block_height + ); + indexer.raw_store_header(block_header.clone()).unwrap(); burnchain_db .raw_store_burnchain_block( &self.config.burnchain, @@ -2793,6 +2812,14 @@ pub mod test { ) .unwrap(); + Burnchain::process_affirmation_maps( + &self.config.burnchain, + &mut burnchain_db, + &indexer, + block_header.block_height, + ) + .unwrap(); + (block_header.block_height, block_header_hash) }; From d6ebc65d515a4bc341a852b8a114c97bd4192cea Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 23 Aug 2021 13:49:26 -0400 Subject: [PATCH 060/116] fix: better debug log in expect() if there's a missing parent commit --- src/burnchains/db.rs | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/src/burnchains/db.rs b/src/burnchains/db.rs index f44ccb7f36..51c3bf5d1f 100644 --- a/src/burnchains/db.rs +++ b/src/burnchains/db.rs @@ -552,7 +552,10 @@ impl<'a> BurnchainDBTransaction<'a> { let parent_metadata = BurnchainDB::get_commit_metadata(&self.sql_tx, &parent.burn_header_hash, &parent.txid)? - .expect("BUG: no metadata found for parent block-commit"); + .expect(&format!( + "BUG: no metadata found for parent block-commit {},{},{} in {}", + parent.block_height, parent.vtxindex, &parent.txid, &parent.burn_header_hash + )); let (am, affirmed_reward_cycle) = if let Some(anchor_block) = anchor_block { let anchor_am_id = @@ -696,7 +699,10 @@ impl<'a> BurnchainDBTransaction<'a> { let parent_metadata = BurnchainDB::get_commit_metadata(&self.sql_tx, &parent.burn_header_hash, &parent.txid)? - .expect("BUG: no metadata found for existing block commit"); + .expect(&format!( + "BUG: no metadata found for existing block commit {},{},{} in {}", + parent.block_height, parent.vtxindex, &parent.txid, &parent.burn_header_hash + )); test_debug!( "Reward-phase commit {},{},{} has parent {},{}, anchor block {:?}", From b08304651380c77741d21fbc333266c15dbe0f70 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 23 Aug 2021 13:49:43 -0400 Subject: [PATCH 061/116] fix: make it so all SPV headers -- and by extension, all sortitions -- have the same timestamp, so that we can deterministically re-generate block and sortition histories for multiple test peers regardless of what time it is. --- src/burnchains/tests/mod.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/burnchains/tests/mod.rs b/src/burnchains/tests/mod.rs index 88b6087346..ae410d09cd 100644 --- a/src/burnchains/tests/mod.rs +++ b/src/burnchains/tests/mod.rs @@ -44,6 +44,10 @@ use deps::bitcoin::network::serialize::BitcoinHash; use burnchains::bitcoin::spv::BITCOIN_GENESIS_BLOCK_HASH_REGTEST; +// all SPV headers will have this timestamp, so that multiple burnchain nodes will always have the +// same SPV header timestamps regardless of when they are instantiated. +pub const BURNCHAIN_TEST_BLOCK_TIME: u64 = 1629739098; + impl Txid { pub fn from_test_data( block_height: u64, @@ -580,7 +584,7 @@ impl TestBurnchainBlock { .read_burnchain_header(self.block_height.saturating_sub(1)) .unwrap() .unwrap(); - let now = get_epoch_time_secs(); + let now = BURNCHAIN_TEST_BLOCK_TIME; let block_hash = BurnchainHeaderHash::from_bitcoin_hash( &BitcoinIndexer::mock_bitcoin_header(&parent_hdr.block_hash, now as u32).bitcoin_hash(), ); From c26c90ff79241efac92d255baa9776f1ececefba Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 23 Aug 2021 13:50:21 -0400 Subject: [PATCH 062/116] fix: calculate ibd status for the p2p state-machine when processing blocks, since this affects inv sync behavior now. --- src/net/mod.rs | 58 +++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 55 insertions(+), 3 deletions(-) diff --git a/src/net/mod.rs b/src/net/mod.rs index 10627131bd..5a0aaa970b 100644 --- a/src/net/mod.rs +++ b/src/net/mod.rs @@ -2614,18 +2614,55 @@ pub mod test { &self.network.local_peer } + // TODO: DRY up from PoxSyncWatchdog + pub fn infer_initial_burnchain_block_download( + burnchain: &Burnchain, + last_processed_height: u64, + burnchain_height: u64, + ) -> bool { + let ibd = + last_processed_height + (burnchain.stable_confirmations as u64) < burnchain_height; + if ibd { + debug!( + "PoX watchdog: {} + {} < {}, so initial block download", + last_processed_height, burnchain.stable_confirmations, burnchain_height + ); + } else { + debug!( + "PoX watchdog: {} + {} >= {}, so steady-state", + last_processed_height, burnchain.stable_confirmations, burnchain_height + ); + } + ibd + } + pub fn step(&mut self) -> Result { let mut sortdb = self.sortdb.take().unwrap(); let mut stacks_node = self.stacks_node.take().unwrap(); let mut mempool = self.mempool.take().unwrap(); + let burn_tip_height = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .unwrap() + .block_height; + let stacks_tip_height = stacks_node + .chainstate + .get_stacks_chain_tip(&sortdb) + .unwrap() + .map(|blkdat| blkdat.height) + .unwrap_or(0); + let ibd = TestPeer::infer_initial_burnchain_block_download( + &self.config.burnchain, + stacks_tip_height, + burn_tip_height, + ); + let ret = self.network.run( &mut sortdb, &mut stacks_node.chainstate, &mut mempool, None, false, - false, + ibd, 10, &RPCHandlerArgs::default(), &mut HashSet::new(), @@ -2643,13 +2680,28 @@ pub mod test { let mut stacks_node = self.stacks_node.take().unwrap(); let mut mempool = self.mempool.take().unwrap(); + let burn_tip_height = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .unwrap() + .block_height; + let stacks_tip_height = stacks_node + .chainstate + .get_stacks_chain_tip(&sortdb) + .unwrap() + .map(|blkdat| blkdat.height) + .unwrap_or(0); + let ibd = TestPeer::infer_initial_burnchain_block_download( + &self.config.burnchain, + stacks_tip_height, + burn_tip_height, + ); + let ret = self.network.run( &mut sortdb, &mut stacks_node.chainstate, &mut mempool, Some(dns_client), false, - false, + ibd, 10, &RPCHandlerArgs::default(), &mut HashSet::new(), @@ -2770,7 +2822,7 @@ pub mod test { test_debug!("parent hdr ({}): {:?}", &tip.block_height, &parent_hdr); assert_eq!(parent_hdr.block_hash, tip.burn_header_hash); - let now = get_epoch_time_secs(); + let now = BURNCHAIN_TEST_BLOCK_TIME; let block_header_hash = BurnchainHeaderHash::from_bitcoin_hash( &BitcoinIndexer::mock_bitcoin_header(&parent_hdr.block_hash, now as u32) .bitcoin_hash(), From 1716cdf9a3aa4d2bb3bff5589b59089b173abe43 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 24 Aug 2021 10:32:16 -0400 Subject: [PATCH 063/116] fix: remove now-unneeded test --- src/burnchains/bitcoin/indexer.rs | 28 ---------------------------- 1 file changed, 28 deletions(-) diff --git a/src/burnchains/bitcoin/indexer.rs b/src/burnchains/bitcoin/indexer.rs index a27227522a..95d110fffa 100644 --- a/src/burnchains/bitcoin/indexer.rs +++ b/src/burnchains/bitcoin/indexer.rs @@ -1313,32 +1313,4 @@ mod test { let last_block = indexer.sync_headers(0, None).unwrap(); eprintln!("sync'ed to block {}", last_block); } - - #[test] - fn test_load_store_mock_bitcoin_header() { - let parent_block_header_hash = BurnchainHeaderHash::from_hex( - "0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e2206", - ) - .unwrap(); - - let hdr = BurnchainBlockHeader { - block_height: 123, - block_hash: BurnchainHeaderHash::from_bitcoin_hash( - &BitcoinIndexer::mock_bitcoin_header(&parent_block_header_hash, 456).bitcoin_hash(), - ), - parent_block_hash: parent_block_header_hash.clone(), - num_txs: 1, - timestamp: 456, - }; - - let mut indexer = BitcoinIndexer::new_unit_test("/tmp/test_load_store_mock_bitcoin_header"); - - indexer.raw_store_header(hdr.clone()).unwrap(); - - let mut hdr_from_db = indexer.read_burnchain_header(123).unwrap().unwrap(); - - // only txcount will be different - hdr_from_db.num_txs = 1; - assert_eq!(hdr_from_db, hdr); - } } From f94d0a81de15e002935d8a4950026f230a6e652c Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 24 Aug 2021 10:33:03 -0400 Subject: [PATCH 064/116] fix: use expect() instead of unwrap() for panicking test failure --- src/burnchains/tests/mod.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/burnchains/tests/mod.rs b/src/burnchains/tests/mod.rs index ae410d09cd..3008f01e67 100644 --- a/src/burnchains/tests/mod.rs +++ b/src/burnchains/tests/mod.rs @@ -583,7 +583,11 @@ impl TestBurnchainBlock { let parent_hdr = indexer .read_burnchain_header(self.block_height.saturating_sub(1)) .unwrap() - .unwrap(); + .expect(&format!( + "BUG: could not read block at height {}", + self.block_height.saturating_sub(1) + )); + let now = BURNCHAIN_TEST_BLOCK_TIME; let block_hash = BurnchainHeaderHash::from_bitcoin_hash( &BitcoinIndexer::mock_bitcoin_header(&parent_hdr.block_hash, now as u32).bitcoin_hash(), From 0f058f6b5bf22b995cc904fdb98da79be5496a69 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 24 Aug 2021 10:33:24 -0400 Subject: [PATCH 065/116] fix: fix failing test due to changes in the way we handle mocked SPV headers --- src/net/inv.rs | 70 +++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 67 insertions(+), 3 deletions(-) diff --git a/src/net/inv.rs b/src/net/inv.rs index 890cdebfe9..47bb7a7b12 100644 --- a/src/net/inv.rs +++ b/src/net/inv.rs @@ -2567,6 +2567,13 @@ mod test { use super::*; + use burnchains::bitcoin::indexer::BitcoinIndexer; + use burnchains::db::BurnchainHeaderReader; + use burnchains::tests::BURNCHAIN_TEST_BLOCK_TIME; + use burnchains::BurnchainBlockHeader; + use chainstate::coordinator::tests::get_burnchain; + use deps::bitcoin::network::serialize::BitcoinHash; + #[test] fn peerblocksinv_has_ith_block() { let peer_inv = @@ -3098,13 +3105,70 @@ mod test { 41982, ); + let peer_1_test_path = TestPeer::make_test_path(&peer_1_config); + let peer_2_test_path = TestPeer::make_test_path(&peer_2_config); + + let mut peer_1 = TestPeer::new(peer_1_config.clone()); + let mut peer_2 = TestPeer::new(peer_2_config.clone()); + + for (test_path, burnchain) in [ + (peer_1_test_path, &mut peer_1.config.burnchain), + (peer_2_test_path, &mut peer_2.config.burnchain), + ] + .iter_mut() + { + let working_dir = get_burnchain(&test_path, None).working_dir; + + // pre-populate headers + let mut indexer = BitcoinIndexer::new_unit_test(&working_dir); + let now = BURNCHAIN_TEST_BLOCK_TIME; + + for header_height in 1..6 { + let parent_hdr = indexer + .read_burnchain_header(header_height - 1) + .unwrap() + .unwrap(); + + let block_header_hash = BurnchainHeaderHash::from_bitcoin_hash( + &BitcoinIndexer::mock_bitcoin_header(&parent_hdr.block_hash, now as u32) + .bitcoin_hash(), + ); + + let block_header = BurnchainBlockHeader { + block_height: header_height, + block_hash: block_header_hash.clone(), + parent_block_hash: parent_hdr.block_hash.clone(), + num_txs: 0, + timestamp: now, + }; + + test_debug!( + "Pre-populate block header for {}-{} ({})", + &block_header.block_hash, + &block_header.parent_block_hash, + block_header.block_height + ); + indexer.raw_store_header(block_header.clone()).unwrap(); + } + + let hdr = indexer + .read_burnchain_header(burnchain.first_block_height) + .unwrap() + .unwrap(); + burnchain.first_block_hash = hdr.block_hash; + } + peer_1_config.burnchain.first_block_height = 5; peer_2_config.burnchain.first_block_height = 5; + peer_1.config.burnchain.first_block_height = 5; + peer_2.config.burnchain.first_block_height = 5; - let burnchain = peer_1_config.burnchain.clone(); + assert_eq!( + peer_1_config.burnchain.first_block_hash, + peer_2_config.burnchain.first_block_hash + ); - let mut peer_1 = TestPeer::new(peer_1_config); - let mut peer_2 = TestPeer::new(peer_2_config); + let burnchain = peer_1_config.burnchain.clone(); let num_blocks = 5; let first_stacks_block_height = { From 1e210dc7ab53b5c74b7961dd7f1d374c2b9e60eb Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 24 Aug 2021 10:33:40 -0400 Subject: [PATCH 066/116] refactor: add way to directly query a test's directory --- src/net/mod.rs | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/src/net/mod.rs b/src/net/mod.rs index 5a0aaa970b..38add72260 100644 --- a/src/net/mod.rs +++ b/src/net/mod.rs @@ -2323,10 +2323,7 @@ pub mod test { TestPeer::new_with_observer(config, None) } - pub fn new_with_observer( - mut config: TestPeerConfig, - observer: Option<&'a TestEventObserver>, - ) -> TestPeer<'a> { + pub fn make_test_path(config: &TestPeerConfig) -> String { let test_path = format!( "/tmp/blockstack-test-peer-{}-{}", &config.test_name, config.server_port @@ -2339,7 +2336,14 @@ pub mod test { }; fs::create_dir_all(&test_path).unwrap(); + test_path + } + pub fn new_with_observer( + mut config: TestPeerConfig, + observer: Option<&'a TestEventObserver>, + ) -> TestPeer<'a> { + let test_path = TestPeer::make_test_path(&config); let mut miner_factory = TestMinerFactory::new(); let mut miner = miner_factory.next_miner(&config.burnchain, 1, 1, AddressHashMode::SerializeP2PKH); From 362d1268355b1249638ac81cc21e4367c68e4c24 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 30 Sep 2021 17:31:44 -0400 Subject: [PATCH 067/116] fix: documentation -- we use burnchain header hashes in preambles --- src/net/chat.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/net/chat.rs b/src/net/chat.rs index 8e44793c4c..d18df6635f 100644 --- a/src/net/chat.rs +++ b/src/net/chat.rs @@ -309,7 +309,7 @@ pub struct ConversationP2P { pub data_url: UrlString, // where does this peer's data live? Set to a 0-length string if not known. - // highest block height and consensus hash this peer has seen + // highest burnchain block height and burnchain block hash this peer has seen pub burnchain_tip_height: u64, pub burnchain_tip_burn_header_hash: BurnchainHeaderHash, pub burnchain_stable_tip_height: u64, From 5abe1b13cbdf301435f92cbe12f2739b5b7c5b90 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 30 Sep 2021 17:53:58 -0400 Subject: [PATCH 068/116] refactor: put static_block_height_to_reward_cycle into PoxConstants with the other such methods --- src/burnchains/mod.rs | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/src/burnchains/mod.rs b/src/burnchains/mod.rs index 07e3002581..c85692c80a 100644 --- a/src/burnchains/mod.rs +++ b/src/burnchains/mod.rs @@ -441,10 +441,11 @@ impl PoxConstants { first_block_height: u64, block_height: u64, ) -> Option { - if block_height < first_block_height { - return None; - } - Some((block_height - first_block_height) / (self.reward_cycle_length as u64)) + Self::static_block_height_to_reward_cycle( + block_height, + first_block_height, + self.reward_cycle_length as u64, + ) } pub fn is_in_prepare_phase(&self, first_block_height: u64, block_height: u64) -> bool { @@ -461,6 +462,20 @@ impl PoxConstants { || reward_index > ((self.reward_cycle_length - self.prepare_length) as u64) } } + + /// Returns the active reward cycle at the given burn block height + /// * `first_block_ht` - the first burn block height that the Stacks network monitored + /// * `reward_cycle_len` - the length of each reward cycle in the network. + pub fn static_block_height_to_reward_cycle( + block_ht: u64, + first_block_ht: u64, + reward_cycle_len: u64, + ) -> Option { + if block_ht < first_block_ht { + return None; + } + Some((block_ht - first_block_ht) / (reward_cycle_len)) + } } /// Structure for encoding our view of the network From 4d817689da6a8809fd780002919d195f929d09d3 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 30 Sep 2021 17:54:24 -0400 Subject: [PATCH 069/116] refactor: use the right static_block_height_to_reward_cycle --- src/clarity_vm/clarity.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/clarity_vm/clarity.rs b/src/clarity_vm/clarity.rs index 530da05b7d..4594e1721b 100644 --- a/src/clarity_vm/clarity.rs +++ b/src/clarity_vm/clarity.rs @@ -68,6 +68,7 @@ use crate::util::strings::StacksString; use crate::vm::database::STXBalance; use crate::{ burnchains::Burnchain, + burnchains::PoxConstants, clarity_vm::database::marf::{MarfedKV, WritableMarfStore}, }; use crate::{ @@ -664,7 +665,7 @@ impl<'a> ClarityBlockConnection<'a> { let pox_rejection_fraction = self.burn_state_db.get_pox_rejection_fraction(); let v1_unlock_height = self.burn_state_db.get_v1_unlock_height(); - let pox_2_first_cycle = Burnchain::static_block_height_to_reward_cycle( + let pox_2_first_cycle = PoxConstants::static_block_height_to_reward_cycle( v1_unlock_height as u64, first_block_height as u64, pox_reward_cycle_length as u64, From 895a753bdc66f14e84e9c98838253244e2277c10 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 30 Sep 2021 17:54:36 -0400 Subject: [PATCH 070/116] refactor: remove compiler warning about () --- src/vm/types/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/vm/types/mod.rs b/src/vm/types/mod.rs index a4e4c1d1d9..335949a7fd 100644 --- a/src/vm/types/mod.rs +++ b/src/vm/types/mod.rs @@ -399,7 +399,7 @@ impl SequenceData { } pub fn slice(self, left_position: usize, right_position: usize) -> Result { - let empty_seq = (left_position == right_position); + let empty_seq = left_position == right_position; let result = match self { SequenceData::Buffer(data) => { From 7105b9a449d9ab4490ea7bbcfa87bf8df53b633b Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 30 Sep 2021 22:19:45 -0400 Subject: [PATCH 071/116] fix: broken tests arising from not using the bitcoin regtest genesis header --- src/burnchains/db.rs | 4 ++++ src/chainstate/stacks/boot/pox_2_tests.rs | 10 ++++++++-- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/src/burnchains/db.rs b/src/burnchains/db.rs index 51c3bf5d1f..a5a1af4f96 100644 --- a/src/burnchains/db.rs +++ b/src/burnchains/db.rs @@ -971,6 +971,10 @@ impl BurnchainDB { parent_block_hash: BurnchainHeaderHash::sentinel(), }; + debug!( + "Instantiate burnchain DB at {}. First block header is {:?}", + path, &first_block_header + ); db_tx.store_burnchain_db_entry(&first_block_header)?; let first_snapshot = BlockSnapshot::initial( diff --git a/src/chainstate/stacks/boot/pox_2_tests.rs b/src/chainstate/stacks/boot/pox_2_tests.rs index 6d77c0db9f..3b8612728c 100644 --- a/src/chainstate/stacks/boot/pox_2_tests.rs +++ b/src/chainstate/stacks/boot/pox_2_tests.rs @@ -517,7 +517,10 @@ fn test_pox_extend_transition_pox_2() { // tenures start being tracked. let EMPTY_SORTITIONS = 25; - let mut burnchain = Burnchain::default_unittest(0, &BurnchainHeaderHash::zero()); + let mut burnchain = Burnchain::default_unittest( + 0, + &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), + ); burnchain.pox_constants.reward_cycle_length = 5; burnchain.pox_constants.prepare_length = 2; burnchain.pox_constants.anchor_threshold = 1; @@ -894,7 +897,10 @@ fn test_delegate_extend_transition_pox_2() { // tenures start being tracked. let EMPTY_SORTITIONS = 25; - let mut burnchain = Burnchain::default_unittest(0, &BurnchainHeaderHash::zero()); + let mut burnchain = Burnchain::default_unittest( + 0, + &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), + ); burnchain.pox_constants.reward_cycle_length = 5; burnchain.pox_constants.prepare_length = 2; burnchain.pox_constants.anchor_threshold = 1; From ba856caba0ff7d3830354b8bd5dd46856f937f70 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 19 Apr 2022 13:45:40 -0400 Subject: [PATCH 072/116] chore: cargo fmt --- src/burnchains/affirmation.rs | 3 +-- src/burnchains/bitcoin/indexer.rs | 2 +- src/burnchains/db.rs | 8 ++++---- src/burnchains/tests/burnchain.rs | 8 +++----- src/burnchains/tests/db.rs | 10 +++++----- src/burnchains/tests/mod.rs | 13 ++++++++++--- src/chainstate/coordinator/mod.rs | 15 +++++++++++---- src/chainstate/coordinator/tests.rs | 4 +++- src/core/mod.rs | 5 ++--- src/net/mod.rs | 4 ++-- stacks-common/src/types/chainstate.rs | 2 +- 11 files changed, 43 insertions(+), 31 deletions(-) diff --git a/src/burnchains/affirmation.rs b/src/burnchains/affirmation.rs index ca8b1c7157..dad24a6efd 100644 --- a/src/burnchains/affirmation.rs +++ b/src/burnchains/affirmation.rs @@ -40,8 +40,7 @@ use crate::util_lib::db::Error as DBError; use crate::core::StacksEpochId; use crate::types::chainstate::{ - BlockHeaderHash, BurnchainHeaderHash, PoxId, SortitionId, StacksAddress, - StacksBlockId, + BlockHeaderHash, BurnchainHeaderHash, PoxId, SortitionId, StacksAddress, StacksBlockId, }; use crate::util_lib::boot::boot_code_id; diff --git a/src/burnchains/bitcoin/indexer.rs b/src/burnchains/bitcoin/indexer.rs index 8f2e9e0d9d..4efd1470e6 100644 --- a/src/burnchains/bitcoin/indexer.rs +++ b/src/burnchains/bitcoin/indexer.rs @@ -38,11 +38,11 @@ use crate::util_lib::db::Error as DBError; use crate::burnchains::bitcoin::blocks::{BitcoinBlockDownloader, BitcoinBlockParser}; use crate::burnchains::bitcoin::BitcoinNetworkType; +use crate::burnchains::BurnchainBlockHeader; use crate::burnchains::Error as burnchain_error; use crate::burnchains::MagicBytes; use crate::burnchains::BLOCKSTACK_MAGIC_MAINNET; use crate::types::chainstate::BurnchainHeaderHash; -use crate::burnchains::BurnchainBlockHeader; use stacks_common::deps_common::bitcoin::blockdata::block::{BlockHeader, LoneBlockHeader}; use stacks_common::deps_common::bitcoin::network::encodable::VarInt; diff --git a/src/burnchains/db.rs b/src/burnchains/db.rs index a9d948d9c6..6f24abbaad 100644 --- a/src/burnchains/db.rs +++ b/src/burnchains/db.rs @@ -25,15 +25,15 @@ use rusqlite::{ use serde_json; use crate::burnchains::affirmation::*; -use crate::chainstate::burn::operations::LeaderBlockCommitOp; -use crate::chainstate::burn::BlockSnapshot; use crate::burnchains::Txid; use crate::burnchains::{Burnchain, BurnchainBlock, BurnchainBlockHeader, Error as BurnchainError}; use crate::chainstate::burn::operations::BlockstackOperationType; +use crate::chainstate::burn::operations::LeaderBlockCommitOp; +use crate::chainstate::burn::BlockSnapshot; use crate::chainstate::stacks::index::MarfTrieId; use crate::util_lib::db::{ - opt_u64_to_sql, query_row, query_rows, query_row_panic, sql_pragma, sqlite_open, tx_begin_immediate, tx_busy_handler, - u64_to_sql, Error as DBError, FromColumn, FromRow, DBConn + opt_u64_to_sql, query_row, query_row_panic, query_rows, sql_pragma, sqlite_open, + tx_begin_immediate, tx_busy_handler, u64_to_sql, DBConn, Error as DBError, FromColumn, FromRow, }; use crate::chainstate::stacks::index::ClarityMarfTrieId; diff --git a/src/burnchains/tests/burnchain.rs b/src/burnchains/tests/burnchain.rs index f204382992..89173943c0 100644 --- a/src/burnchains/tests/burnchain.rs +++ b/src/burnchains/tests/burnchain.rs @@ -31,8 +31,8 @@ use crate::burnchains::*; use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandleTx}; use crate::chainstate::burn::distribution::BurnSamplePoint; use crate::chainstate::burn::operations::{ - leader_block_commit::BURN_BLOCK_MINED_AT_MODULUS, BlockstackOperationType, - LeaderBlockCommitOp, LeaderKeyRegisterOp, UserBurnSupportOp, + leader_block_commit::BURN_BLOCK_MINED_AT_MODULUS, BlockstackOperationType, LeaderBlockCommitOp, + LeaderKeyRegisterOp, UserBurnSupportOp, }; use crate::chainstate::burn::{BlockSnapshot, ConsensusHash, OpsHash, SortitionHash}; use crate::chainstate::stacks::StacksPublicKey; @@ -52,9 +52,7 @@ use stacks_common::util::uint::Uint512; use stacks_common::util::vrf::VRFPrivateKey; use stacks_common::util::vrf::VRFPublicKey; -use crate::types::chainstate::{ - BlockHeaderHash, BurnchainHeaderHash, PoxId, SortitionId, VRFSeed, -}; +use crate::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, PoxId, SortitionId, VRFSeed}; #[test] fn test_process_block_ops() { diff --git a/src/burnchains/tests/db.rs b/src/burnchains/tests/db.rs index b1457fc283..ea1dfdc117 100644 --- a/src/burnchains/tests/db.rs +++ b/src/burnchains/tests/db.rs @@ -23,22 +23,22 @@ use crate::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, PoxId, SortitionId, StacksAddress, VRFSeed, }; -use crate::chainstate::stacks::address::StacksAddressExtensions; -use crate::burnchains::bitcoin::address::*; use crate::burnchains::affirmation::AffirmationMap; +use crate::burnchains::bitcoin::address::*; use crate::burnchains::bitcoin::blocks::*; use crate::burnchains::bitcoin::*; use crate::burnchains::PoxConstants; use crate::burnchains::BLOCKSTACK_MAGIC_MAINNET; -use crate::chainstate::burn::*; use crate::chainstate::burn::operations::{ leader_block_commit::BURN_BLOCK_MINED_AT_MODULUS, BlockstackOperationType, LeaderBlockCommitOp, }; -use crate::chainstate::stacks::*; +use crate::chainstate::burn::*; use crate::chainstate::coordinator::tests::*; +use crate::chainstate::stacks::address::StacksAddressExtensions; +use crate::chainstate::stacks::*; +use crate::util_lib::db::Error as DBError; use stacks_common::deps_common::bitcoin::blockdata::transaction::Transaction as BtcTx; use stacks_common::deps_common::bitcoin::network::serialize::deserialize; -use crate::util_lib::db::Error as DBError; use stacks_common::util::hash::*; use super::*; diff --git a/src/burnchains/tests/mod.rs b/src/burnchains/tests/mod.rs index 1057dd89c2..f32818e6e7 100644 --- a/src/burnchains/tests/mod.rs +++ b/src/burnchains/tests/mod.rs @@ -20,10 +20,10 @@ pub mod db; use std::collections::HashMap; +use crate::burnchains::bitcoin::indexer::BitcoinIndexer; use crate::burnchains::db::*; use crate::burnchains::Burnchain; use crate::burnchains::*; -use crate::burnchains::bitcoin::indexer::BitcoinIndexer; use crate::chainstate::burn::db::sortdb::*; use crate::chainstate::burn::operations::BlockstackOperationType; use crate::chainstate::burn::operations::*; @@ -556,7 +556,14 @@ impl TestBurnchainBlock { new_snapshot.0 } - pub fn mine_pox<'a, T: BlockEventDispatcher, N: CoordinatorNotices, R: RewardSetProvider, CE: CostEstimator, FE: FeeEstimator>( + pub fn mine_pox< + 'a, + T: BlockEventDispatcher, + N: CoordinatorNotices, + R: RewardSetProvider, + CE: CostEstimator, + FE: FeeEstimator, + >( &self, db: &mut SortitionDB, burnchain: &Burnchain, @@ -693,7 +700,7 @@ impl TestBurnchainFork { N: CoordinatorNotices, R: RewardSetProvider, CE: CostEstimator, - FE: FeeEstimator + FE: FeeEstimator, >( &mut self, db: &mut SortitionDB, diff --git a/src/chainstate/coordinator/mod.rs b/src/chainstate/coordinator/mod.rs index 89884a1d39..46e6b7eb80 100644 --- a/src/chainstate/coordinator/mod.rs +++ b/src/chainstate/coordinator/mod.rs @@ -34,8 +34,8 @@ use crate::burnchains::{ use crate::chainstate::burn::{ db::sortdb::SortitionDB, operations::leader_block_commit::{RewardSetInfo, BURN_BLOCK_MINED_AT_MODULUS}, - operations::LeaderBlockCommitOp, operations::BlockstackOperationType, + operations::LeaderBlockCommitOp, BlockSnapshot, ConsensusHash, }; use crate::chainstate::coordinator::comm::{ @@ -1411,7 +1411,11 @@ impl< } /// Process any Atlas attachment events and forward them to the Atlas subsystem - fn process_atlas_attachment_events(&self, block_receipt: &StacksEpochReceipt, canonical_stacks_tip_height: u64) { + fn process_atlas_attachment_events( + &self, + block_receipt: &StacksEpochReceipt, + canonical_stacks_tip_height: u64, + ) { let mut attachments_instances = HashSet::new(); for receipt in block_receipt.tx_receipts.iter() { if let TransactionOrigin::Stacks(ref transaction) = receipt.transaction { @@ -1429,7 +1433,7 @@ impl< block_receipt.header.index_block_hash(), block_receipt.header.stacks_block_height, receipt.transaction.txid(), - Some(canonical_stacks_tip_height) + Some(canonical_stacks_tip_height), ); if let Some(attachment_instance) = res { attachments_instances.insert(attachment_instance); @@ -1572,7 +1576,10 @@ impl< self.notifier.notify_stacks_block_processed(); increment_stx_blocks_processed_counter(); - self.process_atlas_attachment_events(&block_receipt, new_canonical_block_snapshot.canonical_stacks_tip_height); + self.process_atlas_attachment_events( + &block_receipt, + new_canonical_block_snapshot.canonical_stacks_tip_height, + ); let block_hash = block_receipt.header.anchored_header.block_hash(); let winner_snapshot = SortitionDB::get_block_snapshot_for_winning_stacks_block( diff --git a/src/chainstate/coordinator/tests.rs b/src/chainstate/coordinator/tests.rs index 8ebdaa6936..2bd84a577f 100644 --- a/src/chainstate/coordinator/tests.rs +++ b/src/chainstate/coordinator/tests.rs @@ -284,7 +284,9 @@ fn inner_setup_states( let burnchain = get_burnchain(path, pox_consts.clone()); let epochs = match start_epoch { - StacksEpochId::Epoch2_05 => StacksEpoch::unit_test_2_05_only(burnchain.first_block_height), + StacksEpochId::Epoch2_05 => { + StacksEpoch::unit_test_2_05_only(burnchain.first_block_height) + } StacksEpochId::Epoch21 => StacksEpoch::unit_test_2_1_only(burnchain.first_block_height), _ => panic!("Cannot start in epoch 1.0 or 2.0"), }; diff --git a/src/core/mod.rs b/src/core/mod.rs index 20f5677236..1a923573cb 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -445,7 +445,7 @@ impl StacksEpochExtension for StacksEpoch { }, ] } - + #[cfg(test)] fn unit_test_2_05_only(first_burnchain_height: u64) -> Vec { info!( @@ -534,7 +534,7 @@ impl StacksEpochExtension for StacksEpoch { }, ] } - + #[cfg(test)] fn unit_test_2_1_only(first_burnchain_height: u64) -> Vec { info!( @@ -586,7 +586,6 @@ impl StacksEpochExtension for StacksEpoch { ] } - #[cfg(test)] fn unit_test(stacks_epoch_id: StacksEpochId, first_burnchain_height: u64) -> Vec { match stacks_epoch_id { diff --git a/src/net/mod.rs b/src/net/mod.rs index 330285b073..68544255c2 100644 --- a/src/net/mod.rs +++ b/src/net/mod.rs @@ -2059,13 +2059,13 @@ pub mod test { use crate::address::*; use crate::burnchains::bitcoin::address::*; + use crate::burnchains::bitcoin::indexer::BitcoinIndexer; use crate::burnchains::bitcoin::keys::*; use crate::burnchains::bitcoin::*; use crate::burnchains::burnchain::*; use crate::burnchains::db::BurnchainDB; use crate::burnchains::tests::*; use crate::burnchains::*; - use crate::burnchains::bitcoin::indexer::BitcoinIndexer; use crate::chainstate::burn::db::sortdb; use crate::chainstate::burn::db::sortdb::*; use crate::chainstate::burn::operations::*; @@ -2551,7 +2551,7 @@ pub mod test { pub fn new(config: TestPeerConfig) -> TestPeer<'a> { TestPeer::new_with_observer(config, None) } - + pub fn test_path(config: &TestPeerConfig) -> String { format!( "/tmp/stacks-node-tests/units-test-peer/{}-{}", diff --git a/stacks-common/src/types/chainstate.rs b/stacks-common/src/types/chainstate.rs index 8273b4f9b2..30aa6cd0ac 100644 --- a/stacks-common/src/types/chainstate.rs +++ b/stacks-common/src/types/chainstate.rs @@ -360,7 +360,7 @@ impl BurnchainHeaderHash { // little-endian form (which is also how most libraries work). BurnchainHeaderHash::from_bytes_be(bitcoin_hash.as_bytes()).unwrap() } - + pub fn to_bitcoin_hash(&self) -> Sha256dHash { let mut bytes = self.0.to_vec(); bytes.reverse(); From b3f1e1583d752c37b939f1b86190bd59d7fb7b74 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 19 Apr 2022 14:40:45 -0400 Subject: [PATCH 073/116] fix: add a .reader() trait method for BurnchainIndexer, which for the BitcoinIndexer is just a call to .dup(), which returns an instance of the indexer that will only be used for accessing local state (i.e. to be consumed by a burnchain downloader thread). Add this to make it possible to preserve the indexer instance across calls to .sync_with_indexer(), such as in the node implementations. --- src/burnchains/bitcoin/indexer.rs | 4 ++++ src/burnchains/burnchain.rs | 20 +++++++++++--------- src/burnchains/indexer.rs | 5 +++++ 3 files changed, 20 insertions(+), 9 deletions(-) diff --git a/src/burnchains/bitcoin/indexer.rs b/src/burnchains/bitcoin/indexer.rs index 4efd1470e6..aa1ae4a07d 100644 --- a/src/burnchains/bitcoin/indexer.rs +++ b/src/burnchains/bitcoin/indexer.rs @@ -927,6 +927,10 @@ impl BurnchainIndexer for BitcoinIndexer { fn parser(&self) -> BitcoinBlockParser { BitcoinBlockParser::new(self.runtime.network_id, self.config.magic_bytes) } + + fn reader(&self) -> BitcoinIndexer { + self.dup() + } } impl BurnchainHeaderReader for BitcoinIndexer { diff --git a/src/burnchains/burnchain.rs b/src/burnchains/burnchain.rs index fdc19ec991..c8d5264879 100644 --- a/src/burnchains/burnchain.rs +++ b/src/burnchains/burnchain.rs @@ -974,7 +974,7 @@ impl Burnchain { /// Returns new latest block height. pub fn sync( &mut self, - indexer: I, + indexer: &mut I, comms: &CoordinatorChannels, target_block_height_opt: Option, max_blocks_opt: Option, @@ -996,9 +996,9 @@ impl Burnchain { I: BurnchainIndexer + BurnchainHeaderReader + 'static + Send, >( &mut self, - mut indexer: I, + indexer: &mut I, ) -> Result<(BlockSnapshot, Option), burnchain_error> { - self.setup_chainstate(&mut indexer)?; + self.setup_chainstate(indexer)?; let (mut sortdb, mut burnchain_db) = self.connect_db( true, indexer.get_first_block_header_hash()?, @@ -1023,7 +1023,7 @@ impl Burnchain { let db_height = burn_chain_tip.block_height; // handle reorgs - let (sync_height, did_reorg) = Burnchain::sync_reorg(&mut indexer)?; + let (sync_height, did_reorg) = Burnchain::sync_reorg(indexer)?; if did_reorg { // a reorg happened warn!( @@ -1067,6 +1067,7 @@ impl Burnchain { let mut downloader = indexer.downloader(); let mut parser = indexer.parser(); let input_headers = indexer.read_headers(start_block + 1, end_block + 1)?; + let parser_indexer = indexer.reader(); let burnchain_config = self.clone(); @@ -1138,7 +1139,7 @@ impl Burnchain { &mut sortdb, &mut burnchain_db, &burnchain_config, - &indexer, + &parser_indexer, &burnchain_block, )?; last_processed = (tip, Some(transition)); @@ -1243,7 +1244,7 @@ impl Burnchain { /// If this method returns Err(burnchain_error::TrySyncAgain), then call this method again. pub fn sync_with_indexer( &mut self, - mut indexer: I, + indexer: &mut I, coord_comm: CoordinatorChannels, target_block_height_opt: Option, max_blocks_opt: Option, @@ -1252,7 +1253,7 @@ impl Burnchain { where I: BurnchainIndexer + BurnchainHeaderReader + 'static + Send, { - self.setup_chainstate(&mut indexer)?; + self.setup_chainstate(indexer)?; let (_, mut burnchain_db) = self.connect_db( true, indexer.get_first_block_header_hash()?, @@ -1268,7 +1269,7 @@ impl Burnchain { let db_height = burn_chain_tip.block_height; // handle reorgs - let (sync_height, did_reorg) = Burnchain::sync_reorg(&mut indexer)?; + let (sync_height, did_reorg) = Burnchain::sync_reorg(indexer)?; if did_reorg { // a reorg happened warn!( @@ -1378,6 +1379,7 @@ impl Burnchain { let myself = self.clone(); let input_headers = indexer.read_headers(start_block + 1, end_block + 1)?; + let parser_indexer = indexer.reader(); // TODO: don't re-process blocks. See if the block hash is already present in the burn db, // and if so, do nothing. @@ -1474,7 +1476,7 @@ impl Burnchain { let insert_start = get_epoch_time_ms(); last_processed = - Burnchain::process_block(&myself, &mut burnchain_db, &indexer, &burnchain_block)?; + Burnchain::process_block(&myself, &mut burnchain_db, &parser_indexer, &burnchain_block)?; if !coord_comm.announce_new_burn_block() { return Err(burnchain_error::CoordinatorClosed); } diff --git a/src/burnchains/indexer.rs b/src/burnchains/indexer.rs index 838cb2bc1c..f0dc6b82f0 100644 --- a/src/burnchains/indexer.rs +++ b/src/burnchains/indexer.rs @@ -80,4 +80,9 @@ pub trait BurnchainIndexer { fn downloader(&self) -> <::P as BurnchainBlockParser>::D; fn parser(&self) -> Self::P; + + /// Make an instance of the indexer to be consumed by a burnchain indexer thread, for reading + /// local state (but not downloading or parsing it). + /// This is different from `clone()` in that not all state needs to be copied. + fn reader(&self) -> Self; } From c25b08eb13213b2f1221197d7cfbab79b222e821 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 19 Apr 2022 18:53:36 -0400 Subject: [PATCH 074/116] fix: all block-commits in test framework have epoch marker for 2.1 --- src/burnchains/tests/mod.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/burnchains/tests/mod.rs b/src/burnchains/tests/mod.rs index f32818e6e7..82b7cf9ec4 100644 --- a/src/burnchains/tests/mod.rs +++ b/src/burnchains/tests/mod.rs @@ -31,6 +31,7 @@ use crate::chainstate::burn::*; use crate::chainstate::coordinator::comm::*; use crate::chainstate::coordinator::*; use crate::chainstate::stacks::*; +use crate::core::STACKS_EPOCH_2_1_MARKER; use crate::cost_estimates::{CostEstimator, FeeEstimator}; use crate::util_lib::db::*; use stacks_common::address::*; @@ -476,6 +477,7 @@ impl TestBurnchainBlock { txop.txid = Txid::from_test_data(txop.block_height, txop.vtxindex, &txop.burn_header_hash, 0); + txop.memo = vec![STACKS_EPOCH_2_1_MARKER << 3]; self.txs .push(BlockstackOperationType::LeaderBlockCommit(txop.clone())); From 3ef4308387b0f00d03b45fe63b5c349217fb2714 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 19 Apr 2022 18:55:33 -0400 Subject: [PATCH 075/116] fix: use original unit_tests_2_05() function for generating mocked epoch boundaries --- src/chainstate/coordinator/tests.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/chainstate/coordinator/tests.rs b/src/chainstate/coordinator/tests.rs index 2bd84a577f..32d54caab4 100644 --- a/src/chainstate/coordinator/tests.rs +++ b/src/chainstate/coordinator/tests.rs @@ -284,9 +284,7 @@ fn inner_setup_states( let burnchain = get_burnchain(path, pox_consts.clone()); let epochs = match start_epoch { - StacksEpochId::Epoch2_05 => { - StacksEpoch::unit_test_2_05_only(burnchain.first_block_height) - } + StacksEpochId::Epoch2_05 => StacksEpoch::unit_test_2_05(burnchain.first_block_height), StacksEpochId::Epoch21 => StacksEpoch::unit_test_2_1_only(burnchain.first_block_height), _ => panic!("Cannot start in epoch 1.0 or 2.0"), }; From 333222c37be0607de8735c6f95f3dfcfdb55df26 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 19 Apr 2022 18:55:55 -0400 Subject: [PATCH 076/116] chore: add epoch 2.1 boundary marker --- src/core/mod.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/core/mod.rs b/src/core/mod.rs index 1a923573cb..bf89e3a4b4 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -309,6 +309,10 @@ lazy_static! { /// *or greater*. pub static STACKS_EPOCH_2_05_MARKER: u8 = 0x05; +/// Stacks 2.1 epoch marker. All block-commits in 2.1 must have a memo bitfield with this value +/// *or greater* +pub static STACKS_EPOCH_2_1_MARKER: u8 = 0x06; + #[test] fn test_ord_for_stacks_epoch() { let epochs = STACKS_EPOCHS_MAINNET.clone(); From 5878313d4f107dfd3e40f2b8cce03e100bd6fedf Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 29 Apr 2022 09:51:32 -0400 Subject: [PATCH 077/116] chore: cargo fmt --- src/burnchains/mod.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/src/burnchains/mod.rs b/src/burnchains/mod.rs index 1d40a16369..e3e301683d 100644 --- a/src/burnchains/mod.rs +++ b/src/burnchains/mod.rs @@ -624,4 +624,3 @@ impl BurnchainView { self.last_burn_block_hashes = ret; } } - From 0cb8e4d8b9053f9cf44dc841102d2f2158c180c3 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 22 Jul 2022 15:10:10 -0400 Subject: [PATCH 078/116] chore: cargo fmt --- src/chainstate/coordinator/tests.rs | 18 ++---------------- 1 file changed, 2 insertions(+), 16 deletions(-) diff --git a/src/chainstate/coordinator/tests.rs b/src/chainstate/coordinator/tests.rs index 4f5c73f8be..37ecfc4f64 100644 --- a/src/chainstate/coordinator/tests.rs +++ b/src/chainstate/coordinator/tests.rs @@ -2836,14 +2836,7 @@ fn test_pox_processable_block_in_different_pox_forks() { let _r = std::fs::remove_dir_all(path); let _r = std::fs::remove_dir_all(path_blinded); - let pox_consts = Some(PoxConstants::new( - 5, - 2, - 2, - 25, - 5, - u32::max_value(), - )); + let pox_consts = Some(PoxConstants::new(5, 2, 2, 25, 5, u32::max_value())); let b = get_burnchain(path, pox_consts.clone()); let b_blind = get_burnchain(path_blinded, pox_consts.clone()); @@ -3220,14 +3213,7 @@ fn test_pox_affirmation_fork_duel() { let _r = std::fs::remove_dir_all(path); let _r = std::fs::remove_dir_all(path_blinded); - let pox_consts = Some(PoxConstants::new( - 5, - 2, - 2, - 25, - 5, - u32::max_value(), - )); + let pox_consts = Some(PoxConstants::new(5, 2, 2, 25, 5, u32::max_value())); let b = get_burnchain(path, pox_consts.clone()); let b_blind = get_burnchain(path_blinded, pox_consts.clone()); From c98ebb5e10910c2bc7b5ca10c890c4557dfa3c60 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sun, 24 Jul 2022 14:03:45 -0400 Subject: [PATCH 079/116] fix: tests need to use a genesis block hash for their first burnchain block hash --- src/chainstate/stacks/boot/pox_2_tests.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/chainstate/stacks/boot/pox_2_tests.rs b/src/chainstate/stacks/boot/pox_2_tests.rs index 9144858872..663607b816 100644 --- a/src/chainstate/stacks/boot/pox_2_tests.rs +++ b/src/chainstate/stacks/boot/pox_2_tests.rs @@ -1450,7 +1450,10 @@ fn test_pox_2_getters() { let EMPTY_SORTITIONS = 25; let LOCKUP_AMT = 1024 * POX_THRESHOLD_STEPS_USTX; - let mut burnchain = Burnchain::default_unittest(0, &BurnchainHeaderHash::zero()); + let mut burnchain = Burnchain::default_unittest( + 0, + &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), + ); burnchain.pox_constants.reward_cycle_length = 5; burnchain.pox_constants.prepare_length = 2; burnchain.pox_constants.anchor_threshold = 1; From 2d78424d83deccd8f9d92ab5adb0a6e00f6b98a4 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 26 Jul 2022 14:18:55 -0400 Subject: [PATCH 080/116] feat: make it so that the node operator can start using affirmation maps before 2.1 if overridden in the config --- src/chainstate/coordinator/mod.rs | 190 +++++++++++++++++++++--------- 1 file changed, 136 insertions(+), 54 deletions(-) diff --git a/src/chainstate/coordinator/mod.rs b/src/chainstate/coordinator/mod.rs index f162d124bc..d19ff07095 100644 --- a/src/chainstate/coordinator/mod.rs +++ b/src/chainstate/coordinator/mod.rs @@ -161,6 +161,20 @@ pub trait BlockEventDispatcher { fn dispatch_boot_receipts(&mut self, receipts: Vec); } +pub struct ChainsCoordinatorConfig { + /// true: use affirmation maps before 2.1 + /// false: only use affirmation maps in 2.1 or later + pub always_use_affirmation_maps: bool, +} + +impl ChainsCoordinatorConfig { + pub fn new() -> ChainsCoordinatorConfig { + ChainsCoordinatorConfig { + always_use_affirmation_maps: false, + } + } +} + pub struct ChainsCoordinator< 'a, T: BlockEventDispatcher, @@ -184,6 +198,7 @@ pub struct ChainsCoordinator< reward_set_provider: R, notifier: N, atlas_config: AtlasConfig, + config: ChainsCoordinatorConfig, } #[derive(Debug)] @@ -196,6 +211,7 @@ pub enum Error { FailedToProcessSortition(BurnchainError), DBError(DBError), NotPrepareEndBlock, + NotPoXAnchorBlock, } impl From for Error { @@ -279,6 +295,7 @@ impl<'a, T: BlockEventDispatcher, CE: CostEstimator + ?Sized, FE: FeeEstimator + ChainsCoordinator<'a, T, ArcCounterCoordinatorNotices, OnChainRewardSetProvider, CE, FE> { pub fn run( + config: ChainsCoordinatorConfig, chain_state_db: StacksChainState, burnchain: Burnchain, attachments_tx: SyncSender>, @@ -332,6 +349,7 @@ impl<'a, T: BlockEventDispatcher, CE: CostEstimator + ?Sized, FE: FeeEstimator + cost_estimator, fee_estimator, atlas_config, + config, }; loop { @@ -696,7 +714,7 @@ impl< &self.burnchain, )?; debug!( - "Heaviest anchor block affirmation map is {} at height {}, current is {:?}", + "Heaviest anchor block affirmation map is `{}` at height {}, current is {:?}", &heaviest_am, canonical_burnchain_tip.block_height, &self.heaviest_anchor_block_affirmation_map @@ -1165,7 +1183,6 @@ impl< self.burnchain_blocks_db.conn(), &self.burnchain, |anchor_block_commit, anchor_block_metadata| { - // TODO: check IBD status (issue #2474) self.has_unaffirmed_pox_anchor_block(anchor_block_commit, anchor_block_metadata) }, ) @@ -1197,10 +1214,9 @@ impl< // We halt the ancestry research as soon as we find a processed parent let mut last_processed_ancestor = loop { if let Some(found_sortition) = self.sortition_db.is_sortition_processed(&cursor)? { - test_debug!( + debug!( "Ancestor sortition {} of block {} is processed", - &found_sortition, - &cursor + &found_sortition, &cursor ); break found_sortition; } @@ -1267,10 +1283,13 @@ impl< header.block_height )); - if cur_epoch.epoch_id >= StacksEpochId::Epoch21 { + if cur_epoch.epoch_id >= StacksEpochId::Epoch21 + || self.config.always_use_affirmation_maps + { // potentially have an anchor block, but only process the next reward cycle (and // subsequent reward cycles) with it if the prepare-phase block-commits affirm its - // presence. This only gets checked in Stacks 2.1 or later. + // presence. This only gets checked in Stacks 2.1 or later (unless overridden + // in the config) // NOTE: this mutates rc_info if let Some(missing_anchor_block) = self @@ -1528,20 +1547,72 @@ impl< Ok(()) } + /// Verify that a PoX anchor block candidate is affirmed by the network. + /// Returns Ok(Some(pox_anchor)) if so. + /// Returns Ok(None) if not. + /// Returns Err(Error::NotPoXAnchorBlock) if this block got F*w confirmations but is not the + /// heaviest-confirmed burnchain block. + fn check_pox_anchor_affirmation( + &mut self, + pox_anchor: BlockHeaderHash, + winner_snapshot: &BlockSnapshot, + ) -> Result, Error> { + if BurnchainDB::is_anchor_block( + self.burnchain_blocks_db.conn(), + &winner_snapshot.burn_header_hash, + &winner_snapshot.winning_block_txid, + )? { + // affirmed? + let canonical_am = self.get_canonical_affirmation_map()?; + + let commit = BurnchainDB::get_block_commit( + self.burnchain_blocks_db.conn(), + &winner_snapshot.winning_block_txid, + )? + .expect("BUG: no commit metadata in DB for existing commit"); + + let reward_cycle = self + .burnchain + .block_height_to_reward_cycle(commit.block_height) + .expect( + "BUG: accepted block commit has a block height before the first reward cycle", + ); + + if canonical_am + .at(reward_cycle) + .unwrap_or(&AffirmationMapEntry::PoxAnchorBlockAbsent) + == &AffirmationMapEntry::PoxAnchorBlockPresent + { + // yup, we're expecting this + info!("Discovered an old anchor block: {}", &pox_anchor); + return Ok(Some(pox_anchor)); + } else { + // nope -- can ignore + debug!("Discovered unaffirmed old anchor block: {}", &pox_anchor); + return Ok(None); + } + } else { + debug!("Stacks block {} received F*w confirmations but is not the heaviest-confirmed burnchain block, so treating as non-anchor block", &pox_anchor); + return Err(Error::NotPoXAnchorBlock); + } + } + /// /// Process any ready staging blocks until there are either: /// * there are no more to process /// * a PoX anchor block is processed which invalidates the current PoX fork /// - /// Returns Some(StacksBlockId) if such an anchor block is discovered, + /// Returns Some(BlockHeaderHash) if such an anchor block is discovered, /// otherwise returns None /// fn process_ready_blocks(&mut self) -> Result, Error> { - let canonical_sortition_tip = self.canonical_sortition_tip.as_ref().expect( + let canonical_sortition_tip = self.canonical_sortition_tip.clone().expect( "FAIL: processing a new Stacks block, but don't have a canonical sortition tip", ); - let sortdb_handle = self.sortition_db.tx_handle_begin(canonical_sortition_tip)?; + let sortdb_handle = self + .sortition_db + .tx_handle_begin(&canonical_sortition_tip)?; let mut processed_blocks = self.chain_state_db .process_blocks(sortdb_handle, 1, self.dispatcher)?; @@ -1553,17 +1624,17 @@ impl< // TODO: we should update the staging block logic to prevent // blocks like these from getting processed at all. let in_sortition_set = self.sortition_db.is_stacks_block_in_sortition_set( - canonical_sortition_tip, + &canonical_sortition_tip, &block_receipt.header.anchored_header.block_hash(), )?; if in_sortition_set { let new_canonical_block_snapshot = SortitionDB::get_block_snapshot( self.sortition_db.conn(), - canonical_sortition_tip, + &canonical_sortition_tip, )? .expect(&format!( "FAIL: could not find data for the canonical sortition {}", - canonical_sortition_tip + &canonical_sortition_tip )); let new_canonical_stacks_block = new_canonical_block_snapshot.get_canonical_stacks_block_id(); @@ -1580,7 +1651,7 @@ impl< let block_hash = block_receipt.header.anchored_header.block_hash(); let winner_snapshot = SortitionDB::get_block_snapshot_for_winning_stacks_block( &self.sortition_db.index_conn(), - canonical_sortition_tip, + &canonical_sortition_tip, &block_hash, ) .expect("FAIL: could not find block snapshot for winning block hash") @@ -1623,7 +1694,7 @@ impl< // network? if let Some(pox_anchor) = self .sortition_db - .is_stacks_block_pox_anchor(&block_hash, canonical_sortition_tip)? + .is_stacks_block_pox_anchor(&block_hash, &canonical_sortition_tip)? { // what epoch is this block in? let cur_epoch = SortitionDB::get_stacks_epoch( @@ -1640,52 +1711,61 @@ impl< panic!("BUG: Snapshot predates Stacks 2.0"); } StacksEpochId::Epoch20 | StacksEpochId::Epoch2_05 => { - // 2.0/2.05 behavior: only consult the sortition DB - // if, just after processing the block, we _know_ that this block is a pox anchor, that means - // that sortitions have already begun processing that didn't know about this pox anchor. - // we need to trigger an unwind - info!("Discovered an old anchor block: {}", &pox_anchor); - return Ok(Some(pox_anchor)); + if self.config.always_use_affirmation_maps { + // use affirmation maps even if they're not supported yet. + // if the chain is healthy, this won't cause a chain split. + match self + .check_pox_anchor_affirmation(pox_anchor, &winner_snapshot) + { + Ok(Some(pox_anchor)) => { + // yup, affirmed. unwind the sortition history at this + // block + return Ok(Some(pox_anchor)); + } + Ok(None) => { + // unaffirmed old anchor block, so no rewind is needed. + return Ok(None); + } + Err(Error::NotPoXAnchorBlock) => { + panic!("FATAL: found Stacks block that 2.0/2.05 rules would treat as an anchor block, but that 2.1+ would not"); + } + Err(e) => { + error!("Failed to check PoX affirmation: {:?}", &e); + return Err(e); + } + } + } else { + // 2.0/2.05 behavior: only consult the sortition DB + // if, just after processing the block, we _know_ that this block is a pox anchor, that means + // that sortitions have already begun processing that didn't know about this pox anchor. + // we need to trigger an unwind + info!("Discovered an old anchor block: {}", &pox_anchor); + return Ok(Some(pox_anchor)); + } } StacksEpochId::Epoch21 => { // 2.1 behavior: the anchor block must also be the // heaviest-confirmed anchor block by BTC weight, and the highest // such anchor block if there are multiple contenders. - if BurnchainDB::is_anchor_block( - self.burnchain_blocks_db.conn(), - &winner_snapshot.burn_header_hash, - &winner_snapshot.winning_block_txid, - )? { - // affirmed? - let canonical_am = self.get_canonical_affirmation_map()?; - - let commit = BurnchainDB::get_block_commit( - self.burnchain_blocks_db.conn(), - &winner_snapshot.winning_block_txid, - )? - .expect("BUG: no commit metadata in DB for existing commit"); - - let reward_cycle = self.burnchain.block_height_to_reward_cycle(commit.block_height) - .expect("BUG: accepted block commit has a block height before the first reward cycle"); - - if canonical_am - .at(reward_cycle) - .unwrap_or(&AffirmationMapEntry::PoxAnchorBlockAbsent) - == &AffirmationMapEntry::PoxAnchorBlockPresent - { - // yup, we're expecting this - info!("Discovered an old anchor block: {}", &pox_anchor); + match self + .check_pox_anchor_affirmation(pox_anchor, &winner_snapshot) + { + Ok(Some(pox_anchor)) => { + // yup, affirmed. unwind the sortition history at this + // block return Ok(Some(pox_anchor)); - } else { - // nope -- can ignore - debug!( - "Discovered unaffirmed old anchor block: {}", - &pox_anchor - ); + } + Ok(None) => { + // unaffirmed old anchor block, so no rewind is needed. return Ok(None); } - } else { - debug!("Stacks block {} received F*w confirmations but is not the heaviest-confirmed burnchain block, so treating as non-anchor block", &pox_anchor); + Err(Error::NotPoXAnchorBlock) => { + // keep going -- this actually isn't an anchor block + } + Err(e) => { + error!("Failed to check PoX affirmation: {:?}", &e); + return Err(e); + } } } } @@ -1694,7 +1774,9 @@ impl< } // TODO: do something with a poison result - let sortdb_handle = self.sortition_db.tx_handle_begin(canonical_sortition_tip)?; + let sortdb_handle = self + .sortition_db + .tx_handle_begin(&canonical_sortition_tip)?; // Right before a block is set to processed, the event dispatcher will emit a new block event processed_blocks = self.chain_state_db From 41602b3f0a7544dfbb9914a5bcd03b2b87c0cbc2 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 26 Jul 2022 14:19:21 -0400 Subject: [PATCH 081/116] feat: add [node].always_use_affirmation_maps (default false) --- testnet/stacks-node/src/config.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 9150e7e705..70eb0d4d03 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -379,6 +379,7 @@ impl Config { .pox_sync_sample_secs .unwrap_or(default_node_config.pox_sync_sample_secs), use_test_genesis_chainstate: node.use_test_genesis_chainstate, + always_use_affirmation_maps: node.always_use_affirmation_maps.unwrap_or(false), }; (node_config, node.bootstrap_node, node.deny_nodes) } @@ -1063,6 +1064,7 @@ pub struct NodeConfig { pub marf_defer_hashing: bool, pub pox_sync_sample_secs: u64, pub use_test_genesis_chainstate: Option, + pub always_use_affirmation_maps: bool, } #[derive(Clone, Debug)] @@ -1336,6 +1338,7 @@ impl NodeConfig { marf_defer_hashing: true, pox_sync_sample_secs: 30, use_test_genesis_chainstate: None, + always_use_affirmation_maps: false, } } @@ -1522,6 +1525,7 @@ pub struct NodeConfigFile { pub marf_defer_hashing: Option, pub pox_sync_sample_secs: Option, pub use_test_genesis_chainstate: Option, + pub always_use_affirmation_maps: Option, } #[derive(Clone, Deserialize)] From 2f256c8dcc9d5b789c8f1e733f92bc970626ea7b Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 26 Jul 2022 14:19:36 -0400 Subject: [PATCH 082/116] chore: plumb through config.node.always_use_affirmation_maps to ChainsCoordinator::run() --- testnet/stacks-node/src/run_loop/neon.rs | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index df21018288..76e4e895ba 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -21,8 +21,8 @@ use stacks::burnchains::{Address, Burnchain}; use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::coordinator::comm::{CoordinatorChannels, CoordinatorReceivers}; use stacks::chainstate::coordinator::{ - migrate_chainstate_dbs, BlockEventDispatcher, ChainsCoordinator, CoordinatorCommunication, - Error as coord_error, + migrate_chainstate_dbs, BlockEventDispatcher, ChainsCoordinator, ChainsCoordinatorConfig, + CoordinatorCommunication, Error as coord_error, }; use stacks::chainstate::stacks::db::{ChainStateBootData, StacksChainState}; use stacks::net::atlas::{AtlasConfig, Attachment, AttachmentInstance, ATTACHMENTS_CHANNEL_SIZE}; @@ -455,7 +455,12 @@ impl RunLoop { let mut cost_estimator = moved_config.make_cost_estimator(); let mut fee_estimator = moved_config.make_fee_estimator(); + let coord_config = ChainsCoordinatorConfig { + always_use_affirmation_maps: moved_config.node.always_use_affirmation_maps, + ..ChainsCoordinatorConfig::new() + }; ChainsCoordinator::run( + coord_config, chain_state_db, moved_burnchain_config, attachments_tx, From cc8714f499b26095eee38aeb02b1baea4d228497 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 26 Jul 2022 14:45:20 -0400 Subject: [PATCH 083/116] fix: add config field to test chainscoordinator instantiation --- src/chainstate/coordinator/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/chainstate/coordinator/mod.rs b/src/chainstate/coordinator/mod.rs index d19ff07095..1a4eabb785 100644 --- a/src/chainstate/coordinator/mod.rs +++ b/src/chainstate/coordinator/mod.rs @@ -450,6 +450,7 @@ impl<'a, T: BlockEventDispatcher, U: RewardSetProvider> ChainsCoordinator<'a, T, notifier: (), attachments_tx, atlas_config: AtlasConfig::default(false), + config: ChainsCoordinatorConfig::new(), } } } From f883aef18aab7c788861210668fe1e2066f93b39 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 28 Jul 2022 15:18:38 -0400 Subject: [PATCH 084/116] fix: retry burnchain block processing periodically if we're missing an anchor block that should exist (so if it arrives later, we will eventually reprocess the new sortitions it unblocks) --- src/chainstate/coordinator/mod.rs | 77 +++++++++++++++++++++++++++---- 1 file changed, 67 insertions(+), 10 deletions(-) diff --git a/src/chainstate/coordinator/mod.rs b/src/chainstate/coordinator/mod.rs index 1a4eabb785..f63b95cc8c 100644 --- a/src/chainstate/coordinator/mod.rs +++ b/src/chainstate/coordinator/mod.rs @@ -352,19 +352,36 @@ impl<'a, T: BlockEventDispatcher, CE: CostEstimator + ?Sized, FE: FeeEstimator + config, }; + let mut missing_affirmed_anchor_block = None; + let mut last_retry_ts = 0; + let retry_interval = 5; loop { // timeout so that we handle Ctrl-C a little gracefully match comms.wait_on() { CoordinatorEvents::NEW_STACKS_BLOCK => { debug!("Received new stacks block notice"); - if let Err(e) = inst.handle_new_stacks_block() { - warn!("Error processing new stacks block: {:?}", e); + match inst.handle_new_stacks_block() { + Ok(missing_block_opt) => { + if missing_block_opt.is_some() { + missing_affirmed_anchor_block = missing_block_opt; + } + } + Err(e) => { + warn!("Error processing new stacks block: {:?}", e); + } } } CoordinatorEvents::NEW_BURN_BLOCK => { debug!("Received new burn block notice"); - if let Err(e) = inst.handle_new_burnchain_block() { - warn!("Error processing new burn block: {:?}", e); + match inst.handle_new_burnchain_block() { + Ok(missing_block_opt) => { + if missing_block_opt.is_some() { + missing_affirmed_anchor_block = missing_block_opt; + } + } + Err(e) => { + warn!("Error processing new burn block: {:?}", e); + } } } CoordinatorEvents::STOP => { @@ -373,6 +390,29 @@ impl<'a, T: BlockEventDispatcher, CE: CostEstimator + ?Sized, FE: FeeEstimator + } CoordinatorEvents::TIMEOUT => {} } + + if let Some(missing_block) = missing_affirmed_anchor_block { + if last_retry_ts + retry_interval < get_epoch_time_secs() { + // periodically retry processing sortitions in the event that a missing anchor + // block arrives + match inst.retry_burnchain_block() { + Ok(missing_block_opt) => { + if missing_block_opt.is_none() { + debug!( + "Successfully processed missing affirmed anchor block {}", + &missing_block + ); + } + + missing_affirmed_anchor_block = missing_block_opt; + } + Err(e) => { + warn!("Error retrying sortition-processing with missing affirmed anchor block: {:?}", e); + } + } + last_retry_ts = get_epoch_time_secs(); + } + } } } } @@ -1190,12 +1230,25 @@ impl< .map_err(|e| e.into()) } + pub fn handle_new_burnchain_block(&mut self) -> Result, Error> { + self.inner_handle_new_burnchain_block(false) + } + + pub fn retry_burnchain_block(&mut self) -> Result, Error> { + self.inner_handle_new_burnchain_block(true) + } + /// Handle a new burnchain block, optionally rolling back the canonical PoX sortition history /// and setting it up to be replayed in the event the network affirms a different history. If /// this happens, *and* if re-processing the new affirmed history is *blocked on* the /// unavailability of a PoX anchor block that *must now* exist, then return the hash of this /// anchor block. - pub fn handle_new_burnchain_block(&mut self) -> Result, Error> { + /// + /// `retry` just controls log verbosity + fn inner_handle_new_burnchain_block( + &mut self, + retry: bool, + ) -> Result, Error> { // first, see if the canonical affirmation map has changed. If so, this will wind back the // canonical sortition and stacks chain tips. self.handle_affirmation_reorg()?; @@ -1204,9 +1257,11 @@ impl< let canonical_burnchain_tip = self.burnchain_blocks_db.get_canonical_chain_tip()?; let canonical_affirmation_map = self.get_canonical_affirmation_map()?; - debug!("Handle new canonical burnchain tip"; - "height" => %canonical_burnchain_tip.block_height, - "block_hash" => %canonical_burnchain_tip.block_hash.to_string()); + if !retry { + debug!("Handle new canonical burnchain tip"; + "height" => %canonical_burnchain_tip.block_height, + "block_hash" => %canonical_burnchain_tip.block_hash.to_string()); + } // Retrieve all the direct ancestors of this block with an unprocessed sortition let mut cursor = canonical_burnchain_tip.block_hash.clone(); @@ -1300,8 +1355,10 @@ impl< rc_info, )? { - // missing this anchor block -- cannot proceed - info!("Burnchain block processing stops due to missing affirmed anchor block {}", &missing_anchor_block); + // missing this anchor block -- cannot proceed until we have it + if !retry { + info!("Burnchain block processing stops due to missing affirmed anchor block {}", &missing_anchor_block); + } return Ok(Some(missing_anchor_block)); } } From 1e3408e457da185dc74e2ae3e58b3e1b657aa15c Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 29 Jul 2022 13:20:43 -0400 Subject: [PATCH 085/116] fix: explicitly set CARGO_MANIFEST_DIR in a bid to fix CI --- .github/actions/bitcoin-int-tests/Dockerfile.code-cov | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/actions/bitcoin-int-tests/Dockerfile.code-cov b/.github/actions/bitcoin-int-tests/Dockerfile.code-cov index ed90c1d71c..733f879b75 100644 --- a/.github/actions/bitcoin-int-tests/Dockerfile.code-cov +++ b/.github/actions/bitcoin-int-tests/Dockerfile.code-cov @@ -2,6 +2,8 @@ FROM rust:bullseye AS test WORKDIR /build +ENV CARGO_MANIFEST_DIR="$(pwd)" + RUN rustup override set nightly-2022-01-14 && \ rustup component add llvm-tools-preview && \ cargo install grcov From 462dc63eeb03677540e6b46c7ad52d6fccf12f7d Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 12 Oct 2022 15:13:09 -0400 Subject: [PATCH 086/116] feat: add a 100-block download test that enables a node to build up a chainstate from 20 reward cycles --- src/net/download.rs | 260 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 260 insertions(+) diff --git a/src/net/download.rs b/src/net/download.rs index e496c0c6cf..41a7a39766 100644 --- a/src/net/download.rs +++ b/src/net/download.rs @@ -2572,8 +2572,10 @@ pub mod test { use rand::Rng; + use crate::burnchains::tests::TestMiner; use crate::chainstate::burn::db::sortdb::*; use crate::chainstate::burn::operations::*; + use crate::chainstate::stacks::db::blocks::MINIMUM_TX_FEE_RATE_PER_BYTE; use crate::chainstate::stacks::miner::*; use crate::chainstate::stacks::tests::*; use crate::chainstate::stacks::*; @@ -2584,10 +2586,13 @@ pub mod test { use crate::net::*; use crate::util_lib::strings::*; use crate::util_lib::test::*; + use clarity::vm::clarity::ClarityConnection; use clarity::vm::costs::ExecutionCost; + use clarity::vm::execute; use clarity::vm::representations::*; use stacks_common::util::hash::*; use stacks_common::util::sleep_ms; + use stacks_common::util::vrf::VRFProof; use super::*; @@ -3157,6 +3162,261 @@ pub mod test { }) } + fn make_contract_call_transaction( + miner: &mut TestMiner, + sortdb: &mut SortitionDB, + chainstate: &mut StacksChainState, + spending_account: &mut TestMiner, + contract_address: StacksAddress, + contract_name: &str, + function_name: &str, + args: Vec, + consensus_hash: &ConsensusHash, + block_hash: &BlockHeaderHash, + ) -> StacksTransaction { + let tx_cc = { + let mut tx_cc = StacksTransaction::new( + TransactionVersion::Testnet, + spending_account.as_transaction_auth().unwrap().into(), + TransactionPayload::new_contract_call( + contract_address, + contract_name, + function_name, + args, + ) + .unwrap(), + ); + + let chain_tip = StacksBlockHeader::make_index_block_hash(consensus_hash, block_hash); + let cur_nonce = chainstate + .with_read_only_clarity_tx(&sortdb.index_conn(), &chain_tip, |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|clarity_db| { + clarity_db + .get_account_nonce(&spending_account.origin_address().unwrap().into()) + }) + }) + .unwrap(); + + test_debug!( + "Nonce of {:?} is {} at {}/{}", + &spending_account.origin_address().unwrap(), + cur_nonce, + consensus_hash, + block_hash + ); + + tx_cc.chain_id = 0x80000000; + tx_cc.auth.set_origin_nonce(cur_nonce); + tx_cc.set_tx_fee(MINIMUM_TX_FEE_RATE_PER_BYTE * 500); + + let mut tx_signer = StacksTransactionSigner::new(&tx_cc); + spending_account.sign_as_origin(&mut tx_signer); + + let tx_cc_signed = tx_signer.get_tx().unwrap(); + + test_debug!( + "make transaction {:?} off of {:?}/{:?}: {:?}", + &tx_cc_signed.txid(), + consensus_hash, + block_hash, + &tx_cc_signed + ); + + spending_account.set_nonce(cur_nonce + 1); + tx_cc_signed + }; + + tx_cc + } + + #[test] + #[ignore] + pub fn test_get_blocks_and_microblocks_2_peers_download_plain_100_blocks() { + // 20 reward cycles + with_timeout(600, || { + run_get_blocks_and_microblocks( + "test_get_blocks_and_microblocks_2_peers_download_plain_100_blocks", + 32100, + 2, + |ref mut peer_configs| { + // build initial network topology + assert_eq!(peer_configs.len(), 2); + + peer_configs[0].connection_opts.disable_block_advertisement = true; + peer_configs[1].connection_opts.disable_block_advertisement = true; + + let peer_0 = peer_configs[0].to_neighbor(); + let peer_1 = peer_configs[1].to_neighbor(); + peer_configs[0].add_neighbor(&peer_1); + peer_configs[1].add_neighbor(&peer_0); + + // peer[1] has a big initial balance + let initial_balances = vec![( + PrincipalData::from( + peer_configs[1].spending_account.origin_address().unwrap(), + ), + 1_000_000_000_000_000, + )]; + + peer_configs[0].initial_balances = initial_balances.clone(); + peer_configs[1].initial_balances = initial_balances; + }, + |num_blocks, ref mut peers| { + // build up block data to replicate + let mut block_data = vec![]; + let spending_account = &mut peers[1].config.spending_account.clone(); + + // function to make a tenure in which a the peer's miner stacks its STX + let mut make_stacking_tenure = |miner: &mut TestMiner, + sortdb: &mut SortitionDB, + chainstate: &mut StacksChainState, + vrfproof: VRFProof, + parent_opt: Option<&StacksBlock>, + microblock_parent_opt: Option< + &StacksMicroblockHeader, + >| { + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + + let stacks_tip_opt = chainstate.get_stacks_chain_tip(sortdb).unwrap(); + let parent_tip = match stacks_tip_opt { + None => { + StacksChainState::get_genesis_header_info(chainstate.db()).unwrap() + } + Some(staging_block) => { + let ic = sortdb.index_conn(); + let snapshot = + SortitionDB::get_block_snapshot_for_winning_stacks_block( + &ic, + &tip.sortition_id, + &staging_block.anchored_block_hash, + ) + .unwrap() + .unwrap(); // succeeds because we don't fork + StacksChainState::get_anchored_block_header_info( + chainstate.db(), + &snapshot.consensus_hash, + &snapshot.winning_stacks_block_hash, + ) + .unwrap() + .unwrap() + } + }; + + let parent_header_hash = parent_tip.anchored_header.block_hash(); + let parent_consensus_hash = parent_tip.consensus_hash.clone(); + let parent_index_hash = StacksBlockHeader::make_index_block_hash( + &parent_consensus_hash, + &parent_header_hash, + ); + + let coinbase_tx = make_coinbase_with_nonce( + miner, + parent_tip.stacks_block_height as usize, + miner.get_nonce(), + None, + ); + + let stack_tx = make_contract_call_transaction( + miner, + sortdb, + chainstate, + spending_account, + StacksAddress::burn_address(false), + "pox", + "stack-stx", + vec![ + Value::UInt(1_000_000_000_000_000 / 2), + execute("{ version: 0x00, hashbytes: 0x1000000010000000100000010000000100000001 }").unwrap().unwrap(), + Value::UInt((tip.block_height + 1) as u128), + Value::UInt(12) + ], + &parent_consensus_hash, + &parent_header_hash + ); + let mut mblock_pubkey_hash_bytes = [0u8; 20]; + mblock_pubkey_hash_bytes.copy_from_slice(&coinbase_tx.txid()[0..20]); + + let builder = StacksBlockBuilder::make_block_builder( + chainstate.mainnet, + &parent_tip, + vrfproof, + tip.total_burn, + Hash160(mblock_pubkey_hash_bytes), + ) + .unwrap(); + + let anchored_block = StacksBlockBuilder::make_anchored_block_from_txs( + builder, + chainstate, + &sortdb.index_conn(), + vec![coinbase_tx, stack_tx], + ) + .unwrap(); + + (anchored_block.0, vec![]) + }; + + for i in 0..50 { + let (mut burn_ops, stacks_block, microblocks) = if i == 1 { + peers[1].make_tenure(&mut make_stacking_tenure) + } else { + peers[1].make_default_tenure() + }; + + let (_, burn_header_hash, consensus_hash) = + peers[1].next_burnchain_block(burn_ops.clone()); + peers[1].process_stacks_epoch_at_tip(&stacks_block, µblocks); + + TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); + + peers[0].next_burnchain_block_raw(burn_ops); + + let sn = SortitionDB::get_canonical_burn_chain_tip( + &peers[1].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + block_data.push(( + sn.consensus_hash.clone(), + Some(stacks_block), + Some(microblocks), + )); + } + block_data + }, + |_| {}, + |peer| { + // check peer health + // nothing should break + match peer.network.block_downloader { + Some(ref dl) => { + assert_eq!(dl.broken_peers.len(), 0); + assert_eq!(dl.dead_peers.len(), 0); + } + None => {} + } + + // no block advertisements (should be disabled) + let _ = peer.for_each_convo_p2p(|event_id, convo| { + let cnt = *(convo + .stats + .msg_rx_counts + .get(&StacksMessageID::BlocksAvailable) + .unwrap_or(&0)); + assert_eq!( + cnt, 0, + "neighbor event={} got {} BlocksAvailable messages", + event_id, cnt + ); + Ok(()) + }); + + true + }, + |_| true, + ); + }) + } + #[test] #[ignore] pub fn test_get_blocks_and_microblocks_5_peers_star() { From 856da0b008e2d41bf602e9dfd5e38d879f0adb9b Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 13 Oct 2022 17:22:57 -0400 Subject: [PATCH 087/116] fix: make it possible to make microblocks with an anchored block --- src/chainstate/stacks/miner.rs | 37 ++++++++++++++++++++++++++++++++-- 1 file changed, 35 insertions(+), 2 deletions(-) diff --git a/src/chainstate/stacks/miner.rs b/src/chainstate/stacks/miner.rs index c3447a074d..8425b57e8e 100644 --- a/src/chainstate/stacks/miner.rs +++ b/src/chainstate/stacks/miner.rs @@ -1209,6 +1209,11 @@ impl StacksBlockBuilder { return true; } + /// Set the block miner's private key + pub fn set_microblock_privkey(&mut self, privk: StacksPrivateKey) { + self.miner_privkey = privk; + } + /// Reset measured costs and fees pub fn reset_costs(&mut self) -> () { self.total_anchored_fees = 0; @@ -1763,7 +1768,6 @@ impl StacksBlockBuilder { consumed } - /// Unconditionally build an anchored block from a list of transactions. /// Used in test cases #[cfg(test)] @@ -1773,6 +1777,26 @@ impl StacksBlockBuilder { burn_dbconn: &SortitionDBConn, mut txs: Vec, ) -> Result<(StacksBlock, u64, ExecutionCost), Error> { + Self::make_anchored_block_and_microblock_from_txs( + builder, + chainstate_handle, + burn_dbconn, + txs, + vec![], + ) + .map(|(stacks_block, size, cost, _)| (stacks_block, size, cost)) + } + + /// Unconditionally build an anchored block from a list of transactions. + /// Used in test cases + #[cfg(test)] + pub fn make_anchored_block_and_microblock_from_txs( + mut builder: StacksBlockBuilder, + chainstate_handle: &StacksChainState, + burn_dbconn: &SortitionDBConn, + mut txs: Vec, + mut mblock_txs: Vec, + ) -> Result<(StacksBlock, u64, ExecutionCost, Option), Error> { debug!("Build anchored block from {} transactions", txs.len()); let (mut chainstate, _) = chainstate_handle.reopen()?; let mut miner_epoch_info = builder.pre_epoch_begin(&mut chainstate, burn_dbconn)?; @@ -1804,8 +1828,17 @@ impl StacksBlockBuilder { } let block = builder.mine_anchored_block(&mut epoch_tx); let size = builder.bytes_so_far; + + let mblock_opt = if mblock_txs.len() > 0 { + builder.micro_txs.append(&mut mblock_txs); + let mblock = builder.mine_next_microblock()?; + Some(mblock) + } else { + None + }; + let cost = builder.epoch_finish(epoch_tx); - Ok((block, size, cost)) + Ok((block, size, cost, mblock_opt)) } /// Create a block builder for mining From 3a8281340c1f7df21b910ceae4c91530b8bc7d16 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 13 Oct 2022 17:23:18 -0400 Subject: [PATCH 088/116] fix: repair node download test so it produces chainstate that our download checker can validate --- src/net/download.rs | 56 +++++++++++++++++++++++++++++++++------------ 1 file changed, 41 insertions(+), 15 deletions(-) diff --git a/src/net/download.rs b/src/net/download.rs index 41a7a39766..a21457b197 100644 --- a/src/net/download.rs +++ b/src/net/download.rs @@ -2584,6 +2584,7 @@ pub mod test { use crate::net::relay::*; use crate::net::test::*; use crate::net::*; + use crate::stacks_common::types::PublicKey; use crate::util_lib::strings::*; use crate::util_lib::test::*; use clarity::vm::clarity::ClarityConnection; @@ -3173,6 +3174,7 @@ pub mod test { args: Vec, consensus_hash: &ConsensusHash, block_hash: &BlockHeaderHash, + nonce_offset: u64, ) -> StacksTransaction { let tx_cc = { let mut tx_cc = StacksTransaction::new( @@ -3195,12 +3197,14 @@ pub mod test { .get_account_nonce(&spending_account.origin_address().unwrap().into()) }) }) - .unwrap(); + .unwrap() + + nonce_offset; test_debug!( - "Nonce of {:?} is {} at {}/{}", + "Nonce of {:?} is {} (+{}) at {}/{}", &spending_account.origin_address().unwrap(), cur_nonce, + nonce_offset, consensus_hash, block_hash ); @@ -3331,29 +3335,51 @@ pub mod test { Value::UInt(12) ], &parent_consensus_hash, - &parent_header_hash + &parent_header_hash, + 0 ); - let mut mblock_pubkey_hash_bytes = [0u8; 20]; - mblock_pubkey_hash_bytes.copy_from_slice(&coinbase_tx.txid()[0..20]); - let builder = StacksBlockBuilder::make_block_builder( + let mblock_tx = make_contract_call_transaction( + miner, + sortdb, + chainstate, + spending_account, + StacksAddress::burn_address(false), + "pox", + "get-pox-info", + vec![], + &parent_consensus_hash, + &parent_header_hash, + 4, + ); + + let mblock_privkey = StacksPrivateKey::new(); + + let mblock_pubkey_hash_bytes = Hash160::from_data( + &StacksPublicKey::from_private(&mblock_privkey).to_bytes(), + ); + + let mut builder = StacksBlockBuilder::make_block_builder( chainstate.mainnet, &parent_tip, vrfproof, tip.total_burn, - Hash160(mblock_pubkey_hash_bytes), + mblock_pubkey_hash_bytes, ) .unwrap(); + builder.set_microblock_privkey(mblock_privkey); - let anchored_block = StacksBlockBuilder::make_anchored_block_from_txs( - builder, - chainstate, - &sortdb.index_conn(), - vec![coinbase_tx, stack_tx], - ) - .unwrap(); + let (anchored_block, _size, _cost, microblock_opt) = + StacksBlockBuilder::make_anchored_block_and_microblock_from_txs( + builder, + chainstate, + &sortdb.index_conn(), + vec![coinbase_tx, stack_tx], + vec![mblock_tx], + ) + .unwrap(); - (anchored_block.0, vec![]) + (anchored_block, vec![microblock_opt.unwrap()]) }; for i in 0..50 { From 47c61f28a05c8c291b160d6d3aed00fbbb430617 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 13 Oct 2022 17:23:46 -0400 Subject: [PATCH 089/116] fix: update network test framework so we can mine with a missing PoX anchor --- src/net/mod.rs | 43 ++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 40 insertions(+), 3 deletions(-) diff --git a/src/net/mod.rs b/src/net/mod.rs index e138527a53..6cb0cb4bb3 100644 --- a/src/net/mod.rs +++ b/src/net/mod.rs @@ -2947,6 +2947,19 @@ pub mod test { &mut self, blockstack_ops: Vec, ) -> (u64, BurnchainHeaderHash, ConsensusHash) { + let x = self.inner_next_burnchain_block(blockstack_ops, true, true); + (x.0, x.1, x.2) + } + + pub fn next_burnchain_block_and_missing_pox_anchor( + &mut self, + blockstack_ops: Vec, + ) -> ( + u64, + BurnchainHeaderHash, + ConsensusHash, + Option, + ) { self.inner_next_burnchain_block(blockstack_ops, true, true) } @@ -2954,6 +2967,19 @@ pub mod test { &mut self, blockstack_ops: Vec, ) -> (u64, BurnchainHeaderHash, ConsensusHash) { + let x = self.inner_next_burnchain_block(blockstack_ops, false, false); + (x.0, x.1, x.2) + } + + pub fn next_burnchain_block_raw_and_missing_pox_anchor( + &mut self, + blockstack_ops: Vec, + ) -> ( + u64, + BurnchainHeaderHash, + ConsensusHash, + Option, + ) { self.inner_next_burnchain_block(blockstack_ops, false, false) } @@ -2988,7 +3014,12 @@ pub mod test { mut blockstack_ops: Vec, set_consensus_hash: bool, set_burn_hash: bool, - ) -> (u64, BurnchainHeaderHash, ConsensusHash) { + ) -> ( + u64, + BurnchainHeaderHash, + ConsensusHash, + Option, + ) { let sortdb = self.sortdb.take().unwrap(); let (block_height, block_hash) = { let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); @@ -3059,7 +3090,8 @@ pub mod test { (block_header.block_height, block_header_hash) }; - self.coord.handle_new_burnchain_block().unwrap(); + let missing_pox_anchor_block_hash_opt = + self.coord.handle_new_burnchain_block().unwrap(); let pox_id = { let ic = sortdb.index_conn(); @@ -3077,7 +3109,12 @@ pub mod test { let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); self.sortdb = Some(sortdb); - (block_height, block_hash, tip.consensus_hash) + ( + block_height, + block_hash, + tip.consensus_hash, + missing_pox_anchor_block_hash_opt, + ) } pub fn preprocess_stacks_block(&mut self, block: &StacksBlock) -> Result { From 28b14e9a001ba399d9491451b5d770ac42a3058e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 13 Oct 2022 17:24:07 -0400 Subject: [PATCH 090/116] chore: debug messages for bitcoind fork --- testnet/stacks-node/src/tests/neon_integrations.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 4e25faefc6..f38d2899ec 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -1475,6 +1475,7 @@ fn bitcoind_forking_test() { // Let's create another fork, deeper let burn_header_hash_to_fork = btc_regtest_controller.get_block_hash(206); + eprintln!("Instigate 10-block deep fork"); btc_regtest_controller.invalidate_block(&burn_header_hash_to_fork); btc_regtest_controller.build_next_block(10); @@ -1484,6 +1485,7 @@ fn bitcoind_forking_test() { let account = get_account(&http_origin, &miner_account); + eprintln!("account after deep fork: {:?}", &account); // N.B. rewards mature after 2 confirmations... assert_eq!(account.balance, 0); assert_eq!(account.nonce, 3); From 8a0e58db0155ae439b8cb4ff77034c529f99afe5 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 13 Oct 2022 21:54:08 -0400 Subject: [PATCH 091/116] chore: add affirmation map rustdocs and remove elided lifetime parameters --- src/burnchains/affirmation.rs | 242 ++++++++++++++++++++++++++++++++-- 1 file changed, 232 insertions(+), 10 deletions(-) diff --git a/src/burnchains/affirmation.rs b/src/burnchains/affirmation.rs index dad24a6efd..d851577c41 100644 --- a/src/burnchains/affirmation.rs +++ b/src/burnchains/affirmation.rs @@ -14,6 +14,223 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +/// ## The Problem +/// +/// There are currently two related design flaws in the way the Stacks blockchain deals with PoX anchor blocks: +/// +/// * If it is ever the case in which a PoX anchor block is missing, and yet somehow manages to achieve 80% or more +/// confirmations during the prepare phase, then the subsequent arrival of that anchor block will cause a _deep_ chain +/// reorg. It doesn't matter how many future blocks get mined -- if the anchor block is later revealed, it will +/// invalidate all of the blocks that did not build on it. While mining and confirming an anchor block is very costly, +/// it's not only possible, but profitable: anyone who manages to do this could hold the blockchain for ransom by +/// threatening to disclose the anchor block and invaldiate all blocks after it unless they were paid not to (i.e. in +/// perpetuity). +/// +/// * If it is ever the case that not enough STX get locked for PoX to begin in reward cycle _R_, then a node that +/// processes Stacks blocks first without the anchor block in _R_ and then with the anchor block in _R_ will crash +/// because it will attempt to calculate the same sortition twice. This is because the same block-commits would be +/// processed in both cases -- they'd both be PoB commits. +/// +/// This subsystem fixes both problems by making the _history of anchor blocks itself_ forkable, and by implementing +/// _Nakamoto consensus_ on the anchor block history forks so that there will always be a canonical anchor block +/// history. In doing so, the Stacks blockchain now has _three_ levels of forks: the Bitcoin chain, the history of PoX +/// anchor blocks, and the history of Stacks blocks. The canonical Stacks fork is the longest history of Stacks blocks +/// that passes through the canonical history of anchor blocks which resides on the canonical Bitcoin chain. +/// +/// ## Background: Sortition Histories +/// +/// Recall that each Bitcoin block can contain block-commits that are valid only if certain anchor blocks are known to +/// the node, and invalid if other anchor blocks are known. Specifically, a block-commit can be a valid PoX +/// block-commit _only if_ the current reward cycle has an anchor block, _and_ that anchor block is known to the node. +/// Otherwise, if the block-commit does not descend from the anchor block, or there is no anchor block for this reward +/// cycle, then the block-commit can only be valid if it's a PoB block-commit. +/// +/// What this means is that there is a _set_ of sortition histories on the Bitcoin chainstate that will each yield a +/// unique history of block-commits (which in turn represent a unique set of possible Stacks forks). This set has +/// _O(2**n)_ members, where _n_ is the number of reward cycles that have anchor blocks. This is because each time a +/// new reward cycle is processed with an anchor block, there will be a sortition history that descends from it in which +/// the anchor block is known to the node, and a sortition history in which it is _not_ known. +/// +/// Which sortition history is the "true" sortition history, and how do we determine this? This is what this subsystem +/// addresses. +/// +/// ## Solution: Weight Sortition Histories by Miner Affirmations +/// +/// Can we deduce whether or not an anchor block _should_ exist and be known to the network, using only Bitcoin +/// chainstate? A likely anchor block's block-commit will have at least 80 confirmations in the prepare phase -- at +/// least F*w (i.e. 80) Bitcoin blocks will contain at least one block-commit that has the likely anchor block-commit as +/// an ancestor. +/// +/// Of course, there are competing block-commits in each Bitcoin block; only one will be chosen as the Stacks block. +/// But, recall that in the prepare phase of a reward cycle, all miners must burn BTC. Because miners are sending BTC +/// to the burn address, you can _compare_ the economic worth of all block-commits within a prepare-phase block. +/// Moreover, you can calculate how much BTC went into confirming a likely anchor block's block-commit. In doing so, we +/// can introduce an extra criterion for selecting the anchor block in a reward cycle: +/// +/// **The PoX anchor block for reward cycle _R_ is a Stacks block that has not yet been chosen to be an anchor block, +/// and is the highest block outside _R_'s prepare phase that has at least F*w confirmations and is confirmed by the +/// most BTC burnt.** +/// +/// This is slightly different than the definition in SIP-007. We're only looking at block-commits now. If there are +/// two or more reward-phase block-commits that got F*w confirmations, then we select the block-commit that got the most +/// BTC. If this block-commit doesn't actually correspond to a Stacks block, then there is no anchor block for the +/// reward cycle. Also, if this block-commit has been an anchor block before in some prior reward cycle, then there is +/// no anchor block for this reward cycle. If Stacks miners are honest, and no Stacks miner has more than 80% of the +/// mining power, then neither of these two cases arise -- Stacks miners will build Stacks blocks on top of blocks they +/// know about, and their corresponding block-commits in the prepare-phase will confirm the block-commit for an anchor +/// block the miners believe exists. +/// +/// The key insight into understanding the solution to #1805 is to see that the act of choosing an anchor block is +/// _also_ the acts of doing the following two things: +/// +/// * Picking a likely anchor block-commit is the act of _affirming_ that the anchor block is known to the network. A +/// bootstrapping node does not know which Stacks blocks actually exist, since it needs to go and actually download +/// them. But, it can examine only the Bitcoin chainstate and deduce the likely anchor block for each reward cycle. If +/// a reward cycle has a likely anchor block-commit, then we say that the set of miners who mined that prepare-phase +/// have _affirmed_ to this node and all future bootstrapping nodes that they believed that this anchor block exists. I +/// say "affirmed" because it's a weaker guarantee than "confirmed" -- the anchor block can still get lost after the +/// miners make their affirmations. +/// +/// * Picking a likely anchor block-commit is the act of affirming all of the previous affirmations that this anchor +/// block represents. An anchor block is a descendant of a history of prior anchor blocks, so miners affirming that it +/// exists by sending block-commits that confirm its block-commit is also the act of miners affirming that all of the +/// ancestor anchor blocks it confirms also exist. For example, if there are 4 reward cycles, and cycles 1, 2, and 3 +/// have anchor blocks, then the act of miners choosing an anchor block in reward cycle 4's prepare phase that descends +/// from the anchor block in reward cycle 3 is _also_ the act of affirming that the anchor block for reward cycle 3 +/// exists. If the anchor block for reward cycle 3 descends from the anchor block of reward cycle 1, but _not_ from the +/// anchor block in reward cycle 2, then the miners have also affirmed that the anchor block for reward cycle 1 exists. +/// Moreover, the anchor block in reward cycle 1 has been affirmed _twice_ -- both by the miners in reward cycle 3's +/// prepare phase, and the miners in reward cycle 4's prepare phase. The anchor block in reward cycle 2 has _not_ been +/// affirmed. +/// +/// The act of building anchor blocks on top of anchor blocks gives us a way to _weight_ the corresponding sortition +/// histories. An anchor block gets "heavier" as the number of descendant anchor blocks increases, and as the number of +/// reward cycles without anchor blocks increases. This is because in both cases, miners are _not_ working on an anchor +/// block history that would _invalidate_ this anchor block -- i.e. they are continuously affirming that this anchor +/// block exists. +/// +/// We can define the weight of a sortition history as the weight of its heaviest anchor block. If you want to produce +/// a sortition history that is heavier, but invalidates the last _N_ anchor blocks, you'll have to mine at least _N + +/// 1_ reward cycles. This gets us a form of Nakamoto consensus for the status of anchor blocks -- the more affirmed an +/// anchor block is, the harder it is to get it unaffirmed. By doing this, we address the first problem with PoX anchor +/// blocks: in order to hold the chain hostage, you have to _continuously_ mine reward cycles that confirm your missing +/// anchor block. +/// +/// ## Implementation: Affirmation Maps +/// +/// We track this information through a data structure called an **affirmation map**. An affirmation map has the +/// following methods: +/// +/// * `at(i)`: Determine the network's affirmation status of the anchor block for the _ith_ reward cycle, starting at +/// reward cycle 1 (reward cycle 0 has no anchor block, ever). The domain of `i` is defined as the set of reward cycles +/// known to the node, excluding 0, and evaluates to one of the following: +/// +/// * `p`: There is an anchor block, and it's present +/// * `a`: There is an anchor block, and it's absent +/// * `n`: There is no anchor block +/// +/// * `weight()`: This returns the maximum number of anchor blocks that descend from an anchor block this affirmation +/// map represents +/// +/// Each block-commit represents an affirmation by the miner about the state of the anchor blocks that the +/// block-commit's Stacks block confirms. When processing block-commits, the node will calculate the affirmation map +/// for each block-commit inductively as follows: +/// +/// * If the block-commit is in the prepare phase for reward cycle _R_: +/// +/// * If there is an anchor block for _R_: +/// +/// * If this commit descends from the anchor block, then its affirmation map is the same as the anchor +/// block's, plus having `at(R)` set to `p` +/// +/// * Otherwise, its affirmation map the same as the anchor block's, plus having `at(R)`set to `a` +/// +/// * Otherwise: +/// +/// * If the parent descended from some anchor block at reward cycle _R - k_ then this commit's affirmation +/// map is the same as its parent, plus having `at(R - k)` set to `p`, plus having all `at(R - k < x < R)` +/// set to `n` if reward cycle _x_ doesn't have an anchor block, and `a` if it does. +/// +/// * Otherwise, this commit's affirmation map is defined as `at(x)` set to `n` if reward cycle _x_ doesn't +/// have an anchor block, and `a` if it does. +/// +/// * Otherwise: +/// +/// * If the parent descended from some anchor block in reward cycle _R - k_, then this commit's affirmation map +/// is the same as its parent, plus having `at(R - k < x < R)` set to `n` if reward cycle _x_ doesn't have an +/// anchor block, and `a` if it does. +/// +/// * Otherwise, this commit's affirmation map is defined as `at(x)` set to `n` if reward cycle _x_ doesn't have +/// an anchor block, and `a` if it does. +/// +/// Consider the example above, where we have anchor block histories 1,3,4 and 1,2. +/// +/// * A block-commit in the prepare-phase for reward cycle 4 that confirms the anchor block for reward cycle 4 would +/// have affirmation map `papp`, because it affirms that the anchor blocks for reward cycles 1, 3, and 4 exist. +/// +/// * A block-commit in the prepare-phase for reward cycle 4 that does NOT confirm the anchor block for reward cycle 4, but +/// descends from a block that descends from the anchor block in reward cycle 3, would have the affirmation map `papa`, +/// because it does NOT affirm that the anchor block for reward cycle 4 exists, but it DOES affirm that the anchor block +/// history terminating at the anchor block for reward cycle 3 exists. +/// +/// * A block-commit in the prepare-phase for reward cycle 4 that descends from a block that descends from the anchor block +/// for reward cycle 2 would have affirmation map `ppaa`, because it builds on the anchor block for reward cycle 2, but it +/// doesn't build on the anchor blocks for 3 and 4. +/// +/// * Suppose reward cycle 5 rolls around, and no anchor block is chosen at all. Then, a block in the reward +/// phase for reward cycle 5 that builds off the anchor block in reward cycle 4 would have affirmation map `pappn`. +/// Similarly, a block in reward cycle 5's reward phase that builds off of the anchor block in reward cycle 2 would have +/// affirmation map `ppaan`. +/// +/// (Here's a small lemma: if any affirmation map has `at(R) = n` for a given reward cycle `R`, then _all_ affirmation +/// maps will have `at(R) == n`). +/// +/// Now that we have a way to measure affirmations on anchor blocks, we can use them to deduce a canonical sortition +/// history as simply the history that represents the affirmation map with the highest `weight()` value. If there's a +/// tie, then we pick the affirmation map with the highest `i` such that `at(i) = p` (i.e. a later anchor block +/// affirmation is a stronger affirmation than an earlier one). This is always a tie-breaker, because each +/// prepare-phase either affirms or does not affirm exactly one anchor block. +/// +/// ### Using Affirmation Maps +/// +/// Each time we finish processing a reward cycle, the burnchain processor identifies the anchor block's commit and +/// updates the affirmation maps for the prepare-phase block-commits in the burnchain DB (now that an anchor block +/// decision has been made). As the DB receives subsequent reward-phase block-commits, their affirmation maps are +/// calculated using the above definition. +/// +/// Each time the chains coordinator processes a burnchain block, it sees if its view of the heaviest affirmation map +/// has changed. If so, it executes a PoX reorg like before -- it invalidates the sortitions back to the latest +/// sortition that is represented on the now-heaviest affirmation map. Unlike before, it will _re-validate_ any +/// sortitions that it has processed in the past if a _prefix_ of the now-heaviest affirmation map has been the heaviest +/// affirmation map in the past. This can arise if there are two competing sets of miners that are fighting over two +/// different sortition histories. In this case, it also forgets the orphaned statuses of all invalidated and +/// re-validated Stacks blocks, so they can be downloaded and applied again to the Stacks chain state (note that a +/// Stacks block will be applied at most once in any case -- it's just that it can be an orphan on one sortition +/// history, but a valid and accepted block in another). +/// +/// Because we take care to re-validate sortitions that have already been processed, we avoid the second design flaw in +/// the PoX anchor block handling -- a sortition will always be processed at most once. This is further guaranteed by +/// making sure that the consensus hash for each sortition is calculated in part from the PoX bit vector that is +/// _induced_ by the heaviest affirmation map. That is, the node's PoX ID is no longer calculated from the presence or +/// absence of anchor blocks, but instead calculated from the heaviest affirmation map as follows: +/// +/// * If `at(i)` is `p` or `n`, then bit `i` is 1 +/// * Otherwise, bit `i` is 0 +/// +/// In addition, when a late anchor block arrives and is processed by the chains coordinator, the heaviest affirmation +/// map is consulted to determine whether or not it _should_ be processed. If it's _not_ affirmed, then it is ignored. +/// +/// ## Failure Recovery +/// +/// In the event that a hidden anchor block arises, this subsystem includes a way to _override_ the heaviest affirmation +/// map for a given reward cycle. If an anchor block is missing, miners can _declare_ it missing by updating a row in +/// the burnchain DB that marks the anchor block as forever missing. This prevents a "short" (but still devastating) +/// reorg whereby an anchor block is missing for _almost_ the duration of the reward cycle -- in such a case, the +/// absence of this declaration would cause the reward cycle's blocks to all be invalidated. Adding this declaration, +/// and then mining an anchor block that does _not_ affirm the missing anchor block would solve this for future +/// bootstrapping nodes. +/// + use std::cmp; use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; use std::convert::{TryFrom, TryInto}; @@ -259,8 +476,8 @@ pub fn get_parent_child_reward_cycles( /// The returned vec is a vec of vecs of block-commits in block order. The ith item is a vec of /// block-commits in block order for the ith prepare-phase block (item 0 is the first prepare-phase /// block's block-commits). -pub fn read_prepare_phase_commits<'a, B: BurnchainHeaderReader>( - burnchain_tx: &BurnchainDBTransaction<'a>, +pub fn read_prepare_phase_commits( + burnchain_tx: &BurnchainDBTransaction, indexer: &B, pox_consts: &PoxConstants, first_block_height: u64, @@ -337,8 +554,8 @@ pub fn read_prepare_phase_commits<'a, B: BurnchainHeaderReader>( /// Find all referenced parent block-commits already in the burnchain DB, so we can extract their VRF seeds. /// If this method errors out, it's because it couldn't read the burnchain headers DB (or it's /// corrupted). Either way, the caller may treat this as a fatal condition. -pub fn read_parent_block_commits<'a, B: BurnchainHeaderReader>( - burnchain_tx: &BurnchainDBTransaction<'a>, +pub fn read_parent_block_commits( + burnchain_tx: &BurnchainDBTransaction, indexer: &B, prepare_phase_ops: &Vec>, ) -> Result, Error> { @@ -492,8 +709,8 @@ pub fn filter_missed_block_commits( /// Returns Some(the winning block commit, descendancy matrix, total confirmations, total burnt) if /// there's an anchor block commit. /// Returns None otherwise -pub fn find_heaviest_block_commit<'a, B: BurnchainHeaderReader>( - burnchain_tx: &BurnchainDBTransaction<'a>, +pub fn find_heaviest_block_commit( + burnchain_tx: &BurnchainDBTransaction, indexer: &B, prepare_phase_ops: &Vec>, anchor_threshold: u32, @@ -553,6 +770,11 @@ pub fn find_heaviest_block_commit<'a, B: BurnchainHeaderReader>( let mut cursor = (opdata.block_height, opdata.vtxindex); while let Some((parent_block, parent_vtxindex)) = parents.get(&cursor) { cursor = (*parent_block, *parent_vtxindex); + if let Some((block_height, vtxindex)) = ancestors.get(&cursor) { + // already processed + cursor = (*block_height, *vtxindex); + break; + } } ancestors.insert((opdata.block_height, opdata.vtxindex), (cursor.0, cursor.1)); } @@ -711,8 +933,8 @@ pub fn find_heaviest_block_commit<'a, B: BurnchainHeaderReader>( /// block-commit in the ith block in the prepare phase descends from the anchor block, or False /// if not. /// Returns only database-related errors. -pub fn find_pox_anchor_block<'a, B: BurnchainHeaderReader>( - burnchain_tx: &BurnchainDBTransaction<'a>, +pub fn find_pox_anchor_block( + burnchain_tx: &BurnchainDBTransaction, reward_cycle: u64, indexer: &B, burnchain: &Burnchain, @@ -763,7 +985,7 @@ pub fn find_pox_anchor_block<'a, B: BurnchainHeaderReader>( &anchor_block_commit.burn_header_hash, &anchor_block_commit.txid, )? - .expect("BUG: anchor block commit has not metadata"); + .expect("BUG: anchor block commit has no metadata"); if let Some(rc) = md.anchor_block { warn!( @@ -846,7 +1068,7 @@ pub fn update_pox_affirmation_maps( // anchor block not found for this upcoming reward cycle tx.clear_anchor_block(reward_cycle + 1)?; - // mark the prepare-phase commits that did NOT elect this next reward cycle's anchor + // mark all prepare-phase commits that did NOT elect this next reward cycle's anchor // block as NOT having descended from any anchor block (since one was not chosen) for block_ops in prepare_ops.iter() { for tx_op in block_ops.iter() { From afc5eaa350b4c8443b47ab50189f25c651cfe1b1 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 13 Oct 2022 21:54:29 -0400 Subject: [PATCH 092/116] fix: add missing variable --- src/burnchains/bitcoin/spv.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/burnchains/bitcoin/spv.rs b/src/burnchains/bitcoin/spv.rs index a7521dcdf9..99fb81bbcd 100644 --- a/src/burnchains/bitcoin/spv.rs +++ b/src/burnchains/bitcoin/spv.rs @@ -185,6 +185,7 @@ impl SpvClient { readwrite: readwrite, reverse_order: reverse_order, headers_db: conn, + check_txcount: true, }; if readwrite { From 76b2c31cfe961e3d387bc57c7df7851cab3aaac3 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 13 Oct 2022 21:54:40 -0400 Subject: [PATCH 093/116] fix: consolidate static pox constant checks and have burnchain wrap them --- src/burnchains/burnchain.rs | 42 +++++++++++++++++++------------------ 1 file changed, 22 insertions(+), 20 deletions(-) diff --git a/src/burnchains/burnchain.rs b/src/burnchains/burnchain.rs index 24b2df4152..4ed7a9f4a9 100644 --- a/src/burnchains/burnchain.rs +++ b/src/burnchains/burnchain.rs @@ -475,9 +475,16 @@ impl Burnchain { .block_height_to_reward_cycle(self.first_block_height, block_height) } - pub fn is_in_prepare_phase(&self, block_height: u64) -> bool { - self.pox_constants - .is_in_prepare_phase(self.first_block_height, block_height) + pub fn static_block_height_to_reward_cycle( + block_height: u64, + first_block_height: u64, + reward_cycle_length: u64, + ) -> Option { + PoxConstants::static_block_height_to_reward_cycle( + block_height, + first_block_height, + reward_cycle_length, + ) } /// Is this block either the first block in a reward cycle or @@ -503,17 +510,12 @@ impl Burnchain { prepare_length: u64, block_height: u64, ) -> bool { - if block_height <= first_block_height { - // not a reward cycle start if we're the first block after genesis. - false - } else { - let effective_height = block_height - first_block_height; - let reward_index = effective_height % reward_cycle_length; - - // NOTE: first block in reward cycle is mod 1, so mod 0 is the last block in the - // prepare phase. - reward_index == 0 || reward_index > ((reward_cycle_length - prepare_length) as u64) - } + PoxConstants::static_is_in_prepare_phase( + first_block_height, + reward_cycle_length, + prepare_length, + block_height, + ) } pub fn is_in_prepare_phase(&self, block_height: u64) -> bool { @@ -544,7 +546,7 @@ impl Burnchain { let mut byte_tail = [0u8; 16]; rng.fill_bytes(&mut byte_tail); - let tmp_path = format!("/tmp/unit-tests-{}", &to_hex(&byte_tail)); + let tmp_path = format!("/tmp/stacks-node-tests/unit-tests-{}", &to_hex(&byte_tail)); let mut ret = Burnchain::new(&tmp_path, &"bitcoin".to_string(), &"mainnet".to_string()).unwrap(); ret.first_block_height = first_block_height; @@ -888,11 +890,11 @@ impl Burnchain { if this_reward_cycle != prev_reward_cycle { // at reward cycle boundary info!( - "Update PoX affirmation maps for reward cycle {} ({}) block {} cycle-length {}", - prev_reward_cycle, - this_reward_cycle, - block_height, - burnchain.pox_constants.reward_cycle_length + "Update PoX affirmation maps for reward cycle"; + "prev_reward_cycle" => %prev_reward_cycle, + "this_reward_cycle" => %this_reward_cycle, + "block_height" => %block_height, + "cycle-length" => %burnchain.pox_constants.reward_cycle_length ); update_pox_affirmation_maps(burnchain_db, indexer, prev_reward_cycle, burnchain)?; } From 95e1ad7faf910abf500b5b0400e3311d365c737f Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 13 Oct 2022 21:55:02 -0400 Subject: [PATCH 094/116] fix: address PR feedback and remove merge artifact --- src/burnchains/db.rs | 36 +++++------------------------------- 1 file changed, 5 insertions(+), 31 deletions(-) diff --git a/src/burnchains/db.rs b/src/burnchains/db.rs index 0e6d4261b3..26212fe273 100644 --- a/src/burnchains/db.rs +++ b/src/burnchains/db.rs @@ -77,7 +77,7 @@ pub struct BlockCommitMetadata { /// if Some(..), then this block-commit is the anchor block for a reward cycle, and the /// reward cycle is represented as the inner u64. pub anchor_block: Option, - /// If Some(..), then this is the anchor block that this block-commit descends from + /// If Some(..), then this is the reward cycle which contains the anchor block that this block-commit descends from pub anchor_block_descendant: Option, } @@ -506,6 +506,8 @@ impl<'a> BurnchainDBTransaction<'a> { /// method updates the prepare-phase block-commit's affirmation map to reflect what its miner /// believes to be the state of all anchor blocks, _including_ this new reward cycle's anchor /// block. + /// Returns the ID of the affirmation map in the database on success. + /// This can be used to later look up the affirmation map. pub fn make_prepare_phase_affirmation_map( &self, indexer: &B, @@ -1037,7 +1039,7 @@ impl BurnchainDB { ) -> Result { let qry = "SELECT * FROM burnchain_db_block_headers ORDER BY block_height DESC, block_hash ASC LIMIT 1"; let opt = query_row(conn, qry, NO_PARAMS)?; - Ok(opt.expect("CORRUPTION: No canonical burnchain tip")) + Ok(opt.expect("CORRUPTION: Could not query highest burnchain header")) } pub fn get_canonical_chain_tip(&self) -> Result { @@ -1211,7 +1213,7 @@ impl BurnchainDB { Ok(Some((commit, commit_metadata))) } - // do NOT call directly; only use in tests + // do NOT call directly; only call directly in tests pub fn store_new_burnchain_block_ops_unchecked( &mut self, burnchain: &Burnchain, @@ -1278,7 +1280,6 @@ impl BurnchainDB { Ok(()) } -<<<<<<< HEAD pub fn get_block_commit( conn: &DBConn, txid: &Txid, @@ -1290,33 +1291,6 @@ impl BurnchainDB { test_debug!("No block-commit tx {}", &txid); Ok(None) } -======= -#[cfg(test)] -mod tests { - use crate::chainstate::stacks::address::StacksAddressExtensions; - use std::convert::TryInto; - - use crate::burnchains::bitcoin::address::*; - use crate::burnchains::bitcoin::blocks::*; - use crate::burnchains::bitcoin::*; - use crate::burnchains::PoxConstants; - use crate::burnchains::BLOCKSTACK_MAGIC_MAINNET; - use crate::chainstate::burn::*; - use crate::chainstate::stacks::address::PoxAddress; - use crate::chainstate::stacks::*; - use stacks_common::address::AddressHashMode; - use stacks_common::deps_common::bitcoin::blockdata::transaction::Transaction as BtcTx; - use stacks_common::deps_common::bitcoin::network::serialize::deserialize; - use stacks_common::util::hash::*; - - use crate::types::chainstate::StacksAddress; - - use super::*; - - fn make_tx(hex_str: &str) -> BtcTx { - let tx_bin = hex_bytes(hex_str).unwrap(); - deserialize(&tx_bin.to_vec()).unwrap() ->>>>>>> next } pub fn get_commit_in_block_at( From dd2163c9eae47ec407da30914ffb20b620314c81 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 13 Oct 2022 21:55:18 -0400 Subject: [PATCH 095/116] fix: use static_is_in_prepare_phase --- src/burnchains/mod.rs | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/src/burnchains/mod.rs b/src/burnchains/mod.rs index 351c32e0e1..6dd983ff9a 100644 --- a/src/burnchains/mod.rs +++ b/src/burnchains/mod.rs @@ -415,17 +415,30 @@ impl PoxConstants { } pub fn is_in_prepare_phase(&self, first_block_height: u64, block_height: u64) -> bool { + Self::static_is_in_prepare_phase( + first_block_height, + self.reward_cycle_length as u64, + self.prepare_length as u64, + block_height, + ) + } + + pub fn static_is_in_prepare_phase( + first_block_height: u64, + reward_cycle_length: u64, + prepare_length: u64, + block_height: u64, + ) -> bool { if block_height <= first_block_height { // not a reward cycle start if we're the first block after genesis. false } else { let effective_height = block_height - first_block_height; - let reward_index = effective_height % (self.reward_cycle_length as u64); + let reward_index = effective_height % reward_cycle_length; // NOTE: first block in reward cycle is mod 1, so mod 0 is the last block in the // prepare phase. - reward_index == 0 - || reward_index > ((self.reward_cycle_length - self.prepare_length) as u64) + reward_index == 0 || reward_index > ((reward_cycle_length - prepare_length) as u64) } } From 61311502eaf65f5f572579fa70d30cbc15dc9429 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 13 Oct 2022 21:55:32 -0400 Subject: [PATCH 096/116] fix: use PoxAddress --- src/burnchains/tests/db.rs | 28 +++++++++++----------------- 1 file changed, 11 insertions(+), 17 deletions(-) diff --git a/src/burnchains/tests/db.rs b/src/burnchains/tests/db.rs index 9874fbc0d0..2c81ba64b2 100644 --- a/src/burnchains/tests/db.rs +++ b/src/burnchains/tests/db.rs @@ -406,11 +406,14 @@ fn test_classify_stack_stx() { bytes: Hash160([1; 20]), }); - let expected_reward_addr = StacksAddress::from_bitcoin_address(&BitcoinAddress { - addrtype: BitcoinAddressType::PublicKeyHash, - network_id: BitcoinNetworkType::Mainnet, - bytes: Hash160([2; 20]), - }); + let expected_reward_addr = PoxAddress::Standard( + StacksAddress::from_bitcoin_address(&BitcoinAddress { + addrtype: BitcoinAddressType::PublicKeyHash, + network_id: BitcoinNetworkType::Mainnet, + bytes: Hash160([2; 20]), + }), + Some(AddressHashMode::SerializeP2PKH), + ); if let BlockstackOperationType::PreStx(op) = &processed_ops_0[0] { assert_eq!(&op.output, &expected_pre_stack_addr); @@ -445,14 +448,8 @@ pub fn make_simple_block_commit( memo: vec![0], commit_outs: vec![ - StacksAddress { - version: 26, - bytes: Hash160::empty(), - }, - StacksAddress { - version: 26, - bytes: Hash160::empty(), - }, + PoxAddress::standard_burn_address(false), + PoxAddress::standard_burn_address(false), ], burn_fee: 10000, @@ -474,10 +471,7 @@ pub fn make_simple_block_commit( }; if burnchain.is_in_prepare_phase(block_height) { - new_op.commit_outs = vec![StacksAddress { - version: 26, - bytes: Hash160::empty(), - }]; + new_op.commit_outs = vec![PoxAddress::standard_burn_address(false)]; } if let Some(ref op) = parent { From 044358567b66389e908a02c1173a9b44502270c4 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 13 Oct 2022 21:55:47 -0400 Subject: [PATCH 097/116] fix: address PR feedback --- src/chainstate/coordinator/mod.rs | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/src/chainstate/coordinator/mod.rs b/src/chainstate/coordinator/mod.rs index e2f1344333..2bfc08f4b6 100644 --- a/src/chainstate/coordinator/mod.rs +++ b/src/chainstate/coordinator/mod.rs @@ -76,8 +76,8 @@ use crate::chainstate::stacks::index::marf::MARFOpenOpts; pub use self::comm::CoordinatorCommunication; -use stacks_common::util::get_epoch_time_secs; use super::stacks::boot::RewardSet; +use stacks_common::util::get_epoch_time_secs; pub mod comm; #[cfg(test)] @@ -1142,11 +1142,12 @@ impl< Ok(None) } - /// For unaffirmed anchor blocks, determine if they should be marked as present or absent. - fn has_unaffirmed_pox_anchor_block( + /// Determine if we have the block data for a given block-commit. + /// Used to see if we have the block data for an unaffirmed PoX anchor block + /// (hence the test_debug! macros referring to PoX anchor blocks) + fn has_stacks_block_for( &self, block_commit: LeaderBlockCommitOp, - _block_commit_metadata: BlockCommitMetadata, ) -> bool { let tip = SortitionDB::get_canonical_burn_chain_tip(self.sortition_db.conn()) .expect("BUG: failed to query chain tip from sortition DB"); @@ -1219,14 +1220,14 @@ impl< } pub fn get_canonical_affirmation_map(&self) -> Result { - // if we don't have an unaffirmed anchor block, and we're no longer in the initial block - // download, then assume that it's absent. Otherwise, if we are in the initial block - // download but we don't have it yet, assume that it's present. BurnchainDB::get_canonical_affirmation_map( self.burnchain_blocks_db.conn(), &self.burnchain, - |anchor_block_commit, anchor_block_metadata| { - self.has_unaffirmed_pox_anchor_block(anchor_block_commit, anchor_block_metadata) + |anchor_block_commit, _anchor_block_metadata| { + // if we don't have an unaffirmed anchor block, and we're no longer in the initial block + // download, then assume that it's absent. Otherwise, if we are in the initial block + // download but we don't have it yet, assume that it's present. + self.has_stacks_block_for(anchor_block_commit) }, ) .map_err(|e| e.into()) From 12bc29e5183b4f7a8fb562e76c2d7c596ebf1fe8 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 13 Oct 2022 21:55:57 -0400 Subject: [PATCH 098/116] fix: derive Clone for some structs that we need to be clonable --- src/chainstate/stacks/boot/mod.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/chainstate/stacks/boot/mod.rs b/src/chainstate/stacks/boot/mod.rs index eed3be64f9..64146ca2bb 100644 --- a/src/chainstate/stacks/boot/mod.rs +++ b/src/chainstate/stacks/boot/mod.rs @@ -135,13 +135,14 @@ pub fn make_contract_id(addr: &StacksAddress, name: &str) -> QualifiedContractId ) } +#[derive(Clone)] pub struct RawRewardSetEntry { pub reward_address: PoxAddress, pub amount_stacked: u128, pub stacker: Option, } -#[derive(Debug, PartialEq, Serialize, Deserialize)] +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] pub struct PoxStartCycleInfo { /// This data contains the set of principals who missed a reward slot /// in this reward cycle. @@ -152,7 +153,7 @@ pub struct PoxStartCycleInfo { pub missed_reward_slots: Vec<(PrincipalData, u128)>, } -#[derive(Debug, PartialEq)] +#[derive(Debug, PartialEq, Clone)] pub struct RewardSet { pub rewarded_addresses: Vec, pub start_cycle_state: PoxStartCycleInfo, From 16127283b3bafef3571d44a2a1178473713e8213 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 13 Oct 2022 21:56:13 -0400 Subject: [PATCH 099/116] fix: remove unneeded `mut`s --- src/chainstate/stacks/miner.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/chainstate/stacks/miner.rs b/src/chainstate/stacks/miner.rs index b94e0d6f02..843afa1806 100644 --- a/src/chainstate/stacks/miner.rs +++ b/src/chainstate/stacks/miner.rs @@ -1933,10 +1933,10 @@ impl StacksBlockBuilder { /// Used in test cases #[cfg(test)] pub fn make_anchored_block_from_txs( - mut builder: StacksBlockBuilder, + builder: StacksBlockBuilder, chainstate_handle: &StacksChainState, burn_dbconn: &SortitionDBConn, - mut txs: Vec, + txs: Vec, ) -> Result<(StacksBlock, u64, ExecutionCost), Error> { Self::make_anchored_block_and_microblock_from_txs( builder, From 7fa4ba24b8e8df043fca87d8d31a112d62f59da2 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sat, 15 Oct 2022 11:56:56 -0400 Subject: [PATCH 100/116] fix: cargo fmt --- src/burnchains/affirmation.rs | 75 +++++++++++++++++------------------ 1 file changed, 37 insertions(+), 38 deletions(-) diff --git a/src/burnchains/affirmation.rs b/src/burnchains/affirmation.rs index d851577c41..1475c1410b 100644 --- a/src/burnchains/affirmation.rs +++ b/src/burnchains/affirmation.rs @@ -15,9 +15,9 @@ // along with this program. If not, see . /// ## The Problem -/// +/// /// There are currently two related design flaws in the way the Stacks blockchain deals with PoX anchor blocks: -/// +/// /// * If it is ever the case in which a PoX anchor block is missing, and yet somehow manages to achieve 80% or more /// confirmations during the prepare phase, then the subsequent arrival of that anchor block will cause a _deep_ chain /// reorg. It doesn't matter how many future blocks get mined -- if the anchor block is later revealed, it will @@ -25,52 +25,52 @@ /// it's not only possible, but profitable: anyone who manages to do this could hold the blockchain for ransom by /// threatening to disclose the anchor block and invaldiate all blocks after it unless they were paid not to (i.e. in /// perpetuity). -/// +/// /// * If it is ever the case that not enough STX get locked for PoX to begin in reward cycle _R_, then a node that /// processes Stacks blocks first without the anchor block in _R_ and then with the anchor block in _R_ will crash /// because it will attempt to calculate the same sortition twice. This is because the same block-commits would be /// processed in both cases -- they'd both be PoB commits. -/// +/// /// This subsystem fixes both problems by making the _history of anchor blocks itself_ forkable, and by implementing /// _Nakamoto consensus_ on the anchor block history forks so that there will always be a canonical anchor block /// history. In doing so, the Stacks blockchain now has _three_ levels of forks: the Bitcoin chain, the history of PoX /// anchor blocks, and the history of Stacks blocks. The canonical Stacks fork is the longest history of Stacks blocks /// that passes through the canonical history of anchor blocks which resides on the canonical Bitcoin chain. -/// +/// /// ## Background: Sortition Histories -/// +/// /// Recall that each Bitcoin block can contain block-commits that are valid only if certain anchor blocks are known to /// the node, and invalid if other anchor blocks are known. Specifically, a block-commit can be a valid PoX /// block-commit _only if_ the current reward cycle has an anchor block, _and_ that anchor block is known to the node. /// Otherwise, if the block-commit does not descend from the anchor block, or there is no anchor block for this reward /// cycle, then the block-commit can only be valid if it's a PoB block-commit. -/// +/// /// What this means is that there is a _set_ of sortition histories on the Bitcoin chainstate that will each yield a /// unique history of block-commits (which in turn represent a unique set of possible Stacks forks). This set has /// _O(2**n)_ members, where _n_ is the number of reward cycles that have anchor blocks. This is because each time a /// new reward cycle is processed with an anchor block, there will be a sortition history that descends from it in which /// the anchor block is known to the node, and a sortition history in which it is _not_ known. -/// +/// /// Which sortition history is the "true" sortition history, and how do we determine this? This is what this subsystem /// addresses. -/// +/// /// ## Solution: Weight Sortition Histories by Miner Affirmations -/// +/// /// Can we deduce whether or not an anchor block _should_ exist and be known to the network, using only Bitcoin /// chainstate? A likely anchor block's block-commit will have at least 80 confirmations in the prepare phase -- at /// least F*w (i.e. 80) Bitcoin blocks will contain at least one block-commit that has the likely anchor block-commit as /// an ancestor. -/// +/// /// Of course, there are competing block-commits in each Bitcoin block; only one will be chosen as the Stacks block. /// But, recall that in the prepare phase of a reward cycle, all miners must burn BTC. Because miners are sending BTC /// to the burn address, you can _compare_ the economic worth of all block-commits within a prepare-phase block. /// Moreover, you can calculate how much BTC went into confirming a likely anchor block's block-commit. In doing so, we /// can introduce an extra criterion for selecting the anchor block in a reward cycle: -/// +/// /// **The PoX anchor block for reward cycle _R_ is a Stacks block that has not yet been chosen to be an anchor block, /// and is the highest block outside _R_'s prepare phase that has at least F*w confirmations and is confirmed by the /// most BTC burnt.** -/// +/// /// This is slightly different than the definition in SIP-007. We're only looking at block-commits now. If there are /// two or more reward-phase block-commits that got F*w confirmations, then we select the block-commit that got the most /// BTC. If this block-commit doesn't actually correspond to a Stacks block, then there is no anchor block for the @@ -79,10 +79,10 @@ /// mining power, then neither of these two cases arise -- Stacks miners will build Stacks blocks on top of blocks they /// know about, and their corresponding block-commits in the prepare-phase will confirm the block-commit for an anchor /// block the miners believe exists. -/// +/// /// The key insight into understanding the solution to #1805 is to see that the act of choosing an anchor block is /// _also_ the acts of doing the following two things: -/// +/// /// * Picking a likely anchor block-commit is the act of _affirming_ that the anchor block is known to the network. A /// bootstrapping node does not know which Stacks blocks actually exist, since it needs to go and actually download /// them. But, it can examine only the Bitcoin chainstate and deduce the likely anchor block for each reward cycle. If @@ -90,7 +90,7 @@ /// have _affirmed_ to this node and all future bootstrapping nodes that they believed that this anchor block exists. I /// say "affirmed" because it's a weaker guarantee than "confirmed" -- the anchor block can still get lost after the /// miners make their affirmations. -/// +/// /// * Picking a likely anchor block-commit is the act of affirming all of the previous affirmations that this anchor /// block represents. An anchor block is a descendant of a history of prior anchor blocks, so miners affirming that it /// exists by sending block-commits that confirm its block-commit is also the act of miners affirming that all of the @@ -102,40 +102,40 @@ /// Moreover, the anchor block in reward cycle 1 has been affirmed _twice_ -- both by the miners in reward cycle 3's /// prepare phase, and the miners in reward cycle 4's prepare phase. The anchor block in reward cycle 2 has _not_ been /// affirmed. -/// +/// /// The act of building anchor blocks on top of anchor blocks gives us a way to _weight_ the corresponding sortition /// histories. An anchor block gets "heavier" as the number of descendant anchor blocks increases, and as the number of /// reward cycles without anchor blocks increases. This is because in both cases, miners are _not_ working on an anchor /// block history that would _invalidate_ this anchor block -- i.e. they are continuously affirming that this anchor /// block exists. -/// +/// /// We can define the weight of a sortition history as the weight of its heaviest anchor block. If you want to produce /// a sortition history that is heavier, but invalidates the last _N_ anchor blocks, you'll have to mine at least _N + /// 1_ reward cycles. This gets us a form of Nakamoto consensus for the status of anchor blocks -- the more affirmed an /// anchor block is, the harder it is to get it unaffirmed. By doing this, we address the first problem with PoX anchor /// blocks: in order to hold the chain hostage, you have to _continuously_ mine reward cycles that confirm your missing /// anchor block. -/// +/// /// ## Implementation: Affirmation Maps -/// +/// /// We track this information through a data structure called an **affirmation map**. An affirmation map has the /// following methods: -/// +/// /// * `at(i)`: Determine the network's affirmation status of the anchor block for the _ith_ reward cycle, starting at /// reward cycle 1 (reward cycle 0 has no anchor block, ever). The domain of `i` is defined as the set of reward cycles /// known to the node, excluding 0, and evaluates to one of the following: -/// +/// /// * `p`: There is an anchor block, and it's present /// * `a`: There is an anchor block, and it's absent /// * `n`: There is no anchor block -/// +/// /// * `weight()`: This returns the maximum number of anchor blocks that descend from an anchor block this affirmation /// map represents -/// +/// /// Each block-commit represents an affirmation by the miner about the state of the anchor blocks that the /// block-commit's Stacks block confirms. When processing block-commits, the node will calculate the affirmation map /// for each block-commit inductively as follows: -/// +/// /// * If the block-commit is in the prepare phase for reward cycle _R_: /// /// * If there is an anchor block for _R_: @@ -164,7 +164,7 @@ /// an anchor block, and `a` if it does. /// /// Consider the example above, where we have anchor block histories 1,3,4 and 1,2. -/// +/// /// * A block-commit in the prepare-phase for reward cycle 4 that confirms the anchor block for reward cycle 4 would /// have affirmation map `papp`, because it affirms that the anchor blocks for reward cycles 1, 3, and 4 exist. /// @@ -181,23 +181,23 @@ /// phase for reward cycle 5 that builds off the anchor block in reward cycle 4 would have affirmation map `pappn`. /// Similarly, a block in reward cycle 5's reward phase that builds off of the anchor block in reward cycle 2 would have /// affirmation map `ppaan`. -/// +/// /// (Here's a small lemma: if any affirmation map has `at(R) = n` for a given reward cycle `R`, then _all_ affirmation /// maps will have `at(R) == n`). -/// +/// /// Now that we have a way to measure affirmations on anchor blocks, we can use them to deduce a canonical sortition /// history as simply the history that represents the affirmation map with the highest `weight()` value. If there's a /// tie, then we pick the affirmation map with the highest `i` such that `at(i) = p` (i.e. a later anchor block /// affirmation is a stronger affirmation than an earlier one). This is always a tie-breaker, because each /// prepare-phase either affirms or does not affirm exactly one anchor block. -/// +/// /// ### Using Affirmation Maps -/// +/// /// Each time we finish processing a reward cycle, the burnchain processor identifies the anchor block's commit and /// updates the affirmation maps for the prepare-phase block-commits in the burnchain DB (now that an anchor block /// decision has been made). As the DB receives subsequent reward-phase block-commits, their affirmation maps are /// calculated using the above definition. -/// +/// /// Each time the chains coordinator processes a burnchain block, it sees if its view of the heaviest affirmation map /// has changed. If so, it executes a PoX reorg like before -- it invalidates the sortitions back to the latest /// sortition that is represented on the now-heaviest affirmation map. Unlike before, it will _re-validate_ any @@ -207,21 +207,21 @@ /// re-validated Stacks blocks, so they can be downloaded and applied again to the Stacks chain state (note that a /// Stacks block will be applied at most once in any case -- it's just that it can be an orphan on one sortition /// history, but a valid and accepted block in another). -/// +/// /// Because we take care to re-validate sortitions that have already been processed, we avoid the second design flaw in /// the PoX anchor block handling -- a sortition will always be processed at most once. This is further guaranteed by /// making sure that the consensus hash for each sortition is calculated in part from the PoX bit vector that is /// _induced_ by the heaviest affirmation map. That is, the node's PoX ID is no longer calculated from the presence or /// absence of anchor blocks, but instead calculated from the heaviest affirmation map as follows: -/// +/// /// * If `at(i)` is `p` or `n`, then bit `i` is 1 /// * Otherwise, bit `i` is 0 -/// +/// /// In addition, when a late anchor block arrives and is processed by the chains coordinator, the heaviest affirmation /// map is consulted to determine whether or not it _should_ be processed. If it's _not_ affirmed, then it is ignored. -/// +/// /// ## Failure Recovery -/// +/// /// In the event that a hidden anchor block arises, this subsystem includes a way to _override_ the heaviest affirmation /// map for a given reward cycle. If an anchor block is missing, miners can _declare_ it missing by updating a row in /// the burnchain DB that marks the anchor block as forever missing. This prevents a "short" (but still devastating) @@ -229,8 +229,7 @@ /// absence of this declaration would cause the reward cycle's blocks to all be invalidated. Adding this declaration, /// and then mining an anchor block that does _not_ affirm the missing anchor block would solve this for future /// bootstrapping nodes. -/// - +/// use std::cmp; use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; use std::convert::{TryFrom, TryInto}; From 253de7ce49ce36be74dfd0d7f095d0bf370b6b78 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sat, 15 Oct 2022 11:57:07 -0400 Subject: [PATCH 101/116] fix: typo --- src/burnchains/db.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/burnchains/db.rs b/src/burnchains/db.rs index 26212fe273..407e340111 100644 --- a/src/burnchains/db.rs +++ b/src/burnchains/db.rs @@ -443,7 +443,7 @@ impl<'a> BurnchainDBTransaction<'a> { commit.parent_vtxindex, )? } else { - // parnet is genesis + // parent is genesis test_debug!( "Parent block-commit of {},{},{} is the genesis commit", &commit.txid, From ebb37375c036a5292fc8f06f1c18639591581537 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sat, 15 Oct 2022 11:57:19 -0400 Subject: [PATCH 102/116] chore: cargo fmt --- src/chainstate/coordinator/mod.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/chainstate/coordinator/mod.rs b/src/chainstate/coordinator/mod.rs index 2bfc08f4b6..03ec6fb13f 100644 --- a/src/chainstate/coordinator/mod.rs +++ b/src/chainstate/coordinator/mod.rs @@ -1145,10 +1145,7 @@ impl< /// Determine if we have the block data for a given block-commit. /// Used to see if we have the block data for an unaffirmed PoX anchor block /// (hence the test_debug! macros referring to PoX anchor blocks) - fn has_stacks_block_for( - &self, - block_commit: LeaderBlockCommitOp, - ) -> bool { + fn has_stacks_block_for(&self, block_commit: LeaderBlockCommitOp) -> bool { let tip = SortitionDB::get_canonical_burn_chain_tip(self.sortition_db.conn()) .expect("BUG: failed to query chain tip from sortition DB"); let ic = self.sortition_db.index_conn(); From 4ed089e3d7fa0696a55ab0282081d6d15c6cad3e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sat, 15 Oct 2022 11:57:26 -0400 Subject: [PATCH 103/116] fix: unit tests must use the regtest bitcoin genesis header hash, not 0's --- src/chainstate/stacks/boot/pox_2_tests.rs | 25 ++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/src/chainstate/stacks/boot/pox_2_tests.rs b/src/chainstate/stacks/boot/pox_2_tests.rs index 3305762bac..5626252983 100644 --- a/src/chainstate/stacks/boot/pox_2_tests.rs +++ b/src/chainstate/stacks/boot/pox_2_tests.rs @@ -879,7 +879,10 @@ fn test_simple_pox_2_auto_unlock(alice_first: bool) { // tenures start being tracked. let EMPTY_SORTITIONS = 25; - let mut burnchain = Burnchain::default_unittest(0, &BurnchainHeaderHash::zero()); + let mut burnchain = Burnchain::default_unittest( + 0, + &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), + ); burnchain.pox_constants.reward_cycle_length = 5; burnchain.pox_constants.prepare_length = 2; burnchain.pox_constants.anchor_threshold = 1; @@ -1114,7 +1117,10 @@ fn delegate_stack_increase() { // tenures start being tracked. let EMPTY_SORTITIONS = 25; - let mut burnchain = Burnchain::default_unittest(0, &BurnchainHeaderHash::zero()); + let mut burnchain = Burnchain::default_unittest( + 0, + &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), + ); burnchain.pox_constants.reward_cycle_length = 5; burnchain.pox_constants.prepare_length = 2; burnchain.pox_constants.anchor_threshold = 1; @@ -1403,7 +1409,10 @@ fn stack_increase() { // tenures start being tracked. let EMPTY_SORTITIONS = 25; - let mut burnchain = Burnchain::default_unittest(0, &BurnchainHeaderHash::zero()); + let mut burnchain = Burnchain::default_unittest( + 0, + &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), + ); burnchain.pox_constants.reward_cycle_length = 5; burnchain.pox_constants.prepare_length = 2; burnchain.pox_constants.anchor_threshold = 1; @@ -1620,7 +1629,10 @@ fn test_lock_period_invariant_extend_transition() { // tenures start being tracked. let EMPTY_SORTITIONS = 25; - let mut burnchain = Burnchain::default_unittest(0, &BurnchainHeaderHash::zero()); + let mut burnchain = Burnchain::default_unittest( + 0, + &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), + ); burnchain.pox_constants.reward_cycle_length = 5; burnchain.pox_constants.prepare_length = 2; burnchain.pox_constants.anchor_threshold = 1; @@ -3094,7 +3106,10 @@ fn test_pox_2_getters() { #[test] fn test_get_pox_addrs() { - let mut burnchain = Burnchain::default_unittest(0, &BurnchainHeaderHash::zero()); + let mut burnchain = Burnchain::default_unittest( + 0, + &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), + ); burnchain.pox_constants.reward_cycle_length = 4; // 4 reward slots burnchain.pox_constants.prepare_length = 2; burnchain.pox_constants.anchor_threshold = 1; From 3c1a8f32272dba5a1924834908884d3b50fe2058 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sat, 15 Oct 2022 21:52:07 -0400 Subject: [PATCH 104/116] fix: correct nonce and rewards in bitcoind forking test --- testnet/stacks-node/src/tests/neon_integrations.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index d3c01b80c9..f7f684c71a 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -1493,15 +1493,15 @@ fn bitcoind_forking_test() { eprintln!("account after deep fork: {:?}", &account); // N.B. rewards mature after 2 confirmations... - assert_eq!(account.balance, 0); - assert_eq!(account.nonce, 3); + assert_eq!(account.balance, 1020400000); + assert_eq!(account.nonce, 4); next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); let account = get_account(&http_origin, &miner_account); // but we're able to keep on mining - assert!(account.nonce >= 3); + assert!(account.nonce > 4); eprintln!("End of test"); channel.stop_chains_coordinator(); From 1619fe96fb11ba65280deff64934a893dd8e605c Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 7 Nov 2022 18:12:02 -0500 Subject: [PATCH 105/116] docs: wordsmith some comments --- src/burnchains/affirmation.rs | 2 +- src/burnchains/db.rs | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/burnchains/affirmation.rs b/src/burnchains/affirmation.rs index 59d0b34cfb..a7ee7d9b86 100644 --- a/src/burnchains/affirmation.rs +++ b/src/burnchains/affirmation.rs @@ -1067,7 +1067,7 @@ pub fn update_pox_affirmation_maps( // anchor block not found for this upcoming reward cycle tx.clear_anchor_block(reward_cycle + 1)?; - // mark all prepare-phase commits that did NOT elect this next reward cycle's anchor + // mark all prepare-phase commits as NOT having descended from the next reward cycle's anchor // block as NOT having descended from any anchor block (since one was not chosen) for block_ops in prepare_ops.iter() { for tx_op in block_ops.iter() { diff --git a/src/burnchains/db.rs b/src/burnchains/db.rs index 0997d0f676..3b2a66fc04 100644 --- a/src/burnchains/db.rs +++ b/src/burnchains/db.rs @@ -1222,7 +1222,8 @@ impl BurnchainDB { Ok(Some((commit, commit_metadata))) } - // do NOT call directly; only call directly in tests + // do NOT call directly; only call directly in tests. + // This is only `pub` because the tests for it live in a different file. pub fn store_new_burnchain_block_ops_unchecked( &mut self, burnchain: &Burnchain, From 199d4c877b18e208c01484c757ad091aa16048bc Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 7 Nov 2022 21:10:34 -0500 Subject: [PATCH 106/116] fix: failing unit tests brought on by the new way we instantiate burnchain state --- src/burnchains/affirmation.rs | 144 +++++++++++++++++----- src/burnchains/tests/db.rs | 21 ++-- src/chainstate/stacks/boot/pox_2_tests.rs | 10 +- 3 files changed, 132 insertions(+), 43 deletions(-) diff --git a/src/burnchains/affirmation.rs b/src/burnchains/affirmation.rs index a7ee7d9b86..11562a560d 100644 --- a/src/burnchains/affirmation.rs +++ b/src/burnchains/affirmation.rs @@ -320,6 +320,18 @@ impl fmt::Debug for AffirmationMap { } } +/// The pointer to the PoX anchor block in the burnchain +pub struct PoxAnchorPtr { + /// height of the block + pub block_height: u64, + /// index in the block + pub vtxindex: u32, + /// how any tokens burnt to create it + pub burnt: u64, + /// number of confirmations it received + pub confs: u64, +} + impl AffirmationMap { pub fn new(entries: Vec) -> AffirmationMap { AffirmationMap { @@ -700,20 +712,17 @@ pub fn filter_missed_block_commits( .collect() } -/// Given a list of block-commits in the prepare-phase, find the block-commit outside the +/// Given a list of block-commits in the prepare-phase, find the block-commit pointer outside the /// prepare-phase which must be the anchor block, if it exists at all. This is always /// the block-commit that has the most cumulative BTC committed behind it (and the highest -/// such in the event of a tie), as well as at least `anchor_threshold` confirmations. If the anchor block -/// commit is found, return the descendancy matrix for it as well. -/// Returns Some(the winning block commit, descendancy matrix, total confirmations, total burnt) if -/// there's an anchor block commit. +/// such in the event of a tie), as well as at least `anchor_threshold` confirmations. +/// Returns the pointer into the burnchain where the anchor block-commit can be found, if it +/// exists at all. /// Returns None otherwise -pub fn find_heaviest_block_commit( - burnchain_tx: &BurnchainDBTransaction, - indexer: &B, +fn inner_find_heaviest_block_commit_ptr( prepare_phase_ops: &Vec>, anchor_threshold: u32, -) -> Result>, u64, u64)>, DBError> { +) -> Option<(PoxAnchorPtr, BTreeMap<(u64, u32), (u64, u32)>)> { // sanity check -- must be in order by block height and vtxindex for prepare_block_ops in prepare_phase_ops.iter() { let mut expected_block_height = None; @@ -810,7 +819,7 @@ pub fn find_heaviest_block_commit( if ancestor_confirmations.len() == 0 { // empty prepare phase test_debug!("Prepare-phase has no block-commits"); - return Ok(None); + return None; } // find the ancestors with at least $anchor_threshold confirmations, and pick the one that has the @@ -839,9 +848,47 @@ pub fn find_heaviest_block_commit( if most_burnt == 0 { // no anchor block possible -- no block-commit has enough confirmations test_debug!("No block-commit has enough support to be an anchor block"); - return Ok(None); + return None; } + Some(( + PoxAnchorPtr { + block_height: ancestor_block, + vtxindex: ancestor_vtxindex, + burnt: most_burnt, + confs: most_confs, + }, + ancestors, + )) +} + +/// Given a list of block-commits in the prepare-phase, find the block-commit outside the +/// prepare-phase which must be the anchor block, if it exists at all. This is always +/// the block-commit that has the most cumulative BTC committed behind it (and the highest +/// such in the event of a tie), as well as at least `anchor_threshold` confirmations. If the anchor block +/// commit is found, return the descendancy matrix for it as well. +/// Returns Some(the winning block commit, descendancy matrix, total confirmations, total burnt) if +/// there's an anchor block commit. +/// Returns None otherwise +pub fn find_heaviest_block_commit( + burnchain_tx: &BurnchainDBTransaction, + indexer: &B, + prepare_phase_ops: &Vec>, + anchor_threshold: u32, +) -> Result>, u64, u64)>, DBError> { + let (pox_anchor_ptr, ancestors) = + match inner_find_heaviest_block_commit_ptr(prepare_phase_ops, anchor_threshold) { + Some(ptr) => ptr, + None => { + return Ok(None); + } + }; + + let ancestor_block = pox_anchor_ptr.block_height; + let ancestor_vtxindex = pox_anchor_ptr.vtxindex; + let most_burnt = pox_anchor_ptr.burnt; + let most_confs = pox_anchor_ptr.confs; + // find the ancestor that this tip confirms let heaviest_ancestor_header = indexer .read_burnchain_headers(ancestor_block, ancestor_block + 1)? @@ -921,29 +968,13 @@ pub fn find_heaviest_block_commit( Ok(None) } -/// Find the PoX anchor block selected in a reward cycle, if it exists. This is the heaviest F*w-confirmed -/// block-commit before the prepare-phase of this reward cycle, provided that it is not already an -/// anchor block for some other reward cycle. Note that the anchor block found will be the anchor -/// block for the *next* reward cycle. -/// Returns: -/// (a) the list of block-commits, grouped by block and ordered by vtxindex, in this prepare phase -/// (b) the PoX anchor block-commit, if it exists, and -/// (c) the descendancy data for the prepare phase. Descendency[i][j] is true if the jth -/// block-commit in the ith block in the prepare phase descends from the anchor block, or False -/// if not. -/// Returns only database-related errors. -pub fn find_pox_anchor_block( +/// Find the valid prepare-phase ops for a given reward cycle +fn inner_find_valid_prepare_phase_commits( burnchain_tx: &BurnchainDBTransaction, reward_cycle: u64, indexer: &B, burnchain: &Burnchain, -) -> Result< - ( - Vec>, - Option<(LeaderBlockCommitOp, Vec>)>, - ), - Error, -> { +) -> Result>, Error> { let pox_consts = &burnchain.pox_constants; let first_block_height = burnchain.first_block_height; @@ -971,6 +1002,59 @@ pub fn find_pox_anchor_block( prepare_ops_valid.len() ); + Ok(prepare_ops_valid) +} + +/// Find the pointer to the PoX anchor block selected in a reward cycle, if it exists. This is the heaviest F*w-confirmed +/// block-commit before the prepare-phase of this reward cycle, provided that it is not already an +/// anchor block for some other reward cycle. Note that the anchor block found will be the anchor +/// block for the *next* reward cycle. +/// Returns a pointer to the block-commit transaction in the burnchain, if the prepare phase +/// selected an anchor block. +/// Returns None if not. +pub fn find_pox_anchor_block_ptr( + burnchain_tx: &BurnchainDBTransaction, + reward_cycle: u64, + indexer: &B, + burnchain: &Burnchain, +) -> Result, Error> { + let prepare_ops_valid = + inner_find_valid_prepare_phase_commits(burnchain_tx, reward_cycle, indexer, burnchain)?; + Ok(inner_find_heaviest_block_commit_ptr( + &prepare_ops_valid, + burnchain.pox_constants.anchor_threshold, + ) + .map(|(ptr, _)| ptr)) +} + +/// Find the PoX anchor block selected in a reward cycle, if it exists. This is the heaviest F*w-confirmed +/// block-commit before the prepare-phase of this reward cycle, provided that it is not already an +/// anchor block for some other reward cycle. Note that the anchor block found will be the anchor +/// block for the *next* reward cycle. +/// Returns: +/// (a) the list of block-commits, grouped by block and ordered by vtxindex, in this prepare phase +/// (b) the PoX anchor block-commit, if it exists, and +/// (c) the descendancy data for the prepare phase. Descendency[i][j] is true if the jth +/// block-commit in the ith block in the prepare phase descends from the anchor block, or False +/// if not. +/// Returns only database-related errors. +pub fn find_pox_anchor_block( + burnchain_tx: &BurnchainDBTransaction, + reward_cycle: u64, + indexer: &B, + burnchain: &Burnchain, +) -> Result< + ( + // (a) prepare-phase block-commits + Vec>, + // (b) PoX anchor block commit (if found) + // (c) descendancy matrix + Option<(LeaderBlockCommitOp, Vec>)>, + ), + Error, +> { + let prepare_ops_valid = + inner_find_valid_prepare_phase_commits(burnchain_tx, reward_cycle, indexer, burnchain)?; let anchor_block_and_descendancy_opt = find_heaviest_block_commit( &burnchain_tx, indexer, diff --git a/src/burnchains/tests/db.rs b/src/burnchains/tests/db.rs index bb12642d88..058c151557 100644 --- a/src/burnchains/tests/db.rs +++ b/src/burnchains/tests/db.rs @@ -31,6 +31,7 @@ use crate::chainstate::stacks::address::PoxAddress; use crate::chainstate::stacks::index::ClarityMarfTrieId; use crate::chainstate::stacks::*; use crate::core::StacksEpochId; +use crate::core::BITCOIN_REGTEST_FIRST_BLOCK_HASH; use crate::types::chainstate::StacksAddress; use crate::util_lib::db::Error as DBError; use stacks_common::address::AddressHashMode; @@ -65,9 +66,8 @@ fn make_tx(hex_str: &str) -> BtcTx { #[test] fn test_store_and_fetch() { - let first_bhh = BurnchainHeaderHash([0; 32]); - let first_timestamp = 321; - let first_height = 1; + let first_bhh = BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(); + let first_height = 0; let mut burnchain = Burnchain::regtest(":memory:"); let mut burnchain_db = BurnchainDB::connect(":memory:", &burnchain, true).unwrap(); @@ -79,7 +79,7 @@ fn test_store_and_fetch() { let first_block_header = burnchain_db.get_canonical_chain_tip().unwrap(); assert_eq!(&first_block_header.block_hash, &first_bhh); assert_eq!(&first_block_header.block_height, &first_height); - assert_eq!(&first_block_header.timestamp, &first_timestamp); + assert_eq!(&first_block_header.timestamp, &0); assert_eq!( &first_block_header.parent_block_hash, &BurnchainHeaderHash::sentinel() @@ -188,9 +188,8 @@ fn test_store_and_fetch() { #[test] fn test_classify_stack_stx() { - let first_bhh = BurnchainHeaderHash([0; 32]); - let first_timestamp = 321; - let first_height = 1; + let first_bhh = BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(); + let first_height = 0; let mut burnchain = Burnchain::regtest(":memory:"); let mut burnchain_db = BurnchainDB::connect(":memory:", &burnchain, true).unwrap(); @@ -202,7 +201,7 @@ fn test_classify_stack_stx() { let first_block_header = burnchain_db.get_canonical_chain_tip().unwrap(); assert_eq!(&first_block_header.block_hash, &first_bhh); assert_eq!(&first_block_header.block_height, &first_height); - assert_eq!(&first_block_header.timestamp, &first_timestamp); + assert_eq!(&first_block_header.timestamp, &0); assert_eq!( &first_block_header.parent_block_hash, &BurnchainHeaderHash::sentinel() @@ -501,7 +500,7 @@ pub fn make_simple_block_commit( #[test] fn test_get_commit_at() { - let first_bhh = BurnchainHeaderHash([0; 32]); + let first_bhh = BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(); let first_timestamp = 0; let first_height = 1; @@ -598,7 +597,7 @@ fn test_get_commit_at() { #[test] fn test_get_set_check_anchor_block() { - let first_bhh = BurnchainHeaderHash([0; 32]); + let first_bhh = BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(); let first_timestamp = 0; let first_height = 1; @@ -683,7 +682,7 @@ fn test_get_set_check_anchor_block() { #[test] fn test_update_block_descendancy() { - let first_bhh = BurnchainHeaderHash([0; 32]); + let first_bhh = BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(); let first_timestamp = 0; let first_height = 1; diff --git a/src/chainstate/stacks/boot/pox_2_tests.rs b/src/chainstate/stacks/boot/pox_2_tests.rs index 656903b6e0..dce03bf837 100644 --- a/src/chainstate/stacks/boot/pox_2_tests.rs +++ b/src/chainstate/stacks/boot/pox_2_tests.rs @@ -3392,7 +3392,10 @@ fn test_get_pox_addrs() { #[test] fn test_stack_with_segwit() { - let mut burnchain = Burnchain::default_unittest(0, &BurnchainHeaderHash::zero()); + let mut burnchain = Burnchain::default_unittest( + 0, + &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), + ); burnchain.pox_constants.reward_cycle_length = 4; // 4 reward slots burnchain.pox_constants.prepare_length = 2; burnchain.pox_constants.anchor_threshold = 1; @@ -3708,7 +3711,10 @@ fn test_pox_2_delegate_stx_addr_validation() { let EMPTY_SORTITIONS = 25; let LOCKUP_AMT = 1024 * POX_THRESHOLD_STEPS_USTX; - let mut burnchain = Burnchain::default_unittest(0, &BurnchainHeaderHash::zero()); + let mut burnchain = Burnchain::default_unittest( + 0, + &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), + ); burnchain.pox_constants.reward_cycle_length = 5; burnchain.pox_constants.prepare_length = 2; burnchain.pox_constants.anchor_threshold = 1; From 0978eff735cda0a2ed7a7cf35106606c291ddcd6 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 8 Nov 2022 15:42:38 -0500 Subject: [PATCH 107/116] fix: set chains-coordinator thread stack size to the same as the miner thread --- testnet/stacks-node/src/run_loop/neon.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index a76ba635d6..b2b1259973 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -36,6 +36,7 @@ use crate::monitoring::start_serving_monitoring_metrics; use crate::neon_node::Globals; use crate::neon_node::StacksNode; use crate::neon_node::RELAYER_MAX_BUFFER; +use crate::neon_node::BLOCK_PROCESSOR_STACK_SIZE; use crate::node::use_test_genesis_chainstate; use crate::syncctl::{PoxSyncWatchdog, PoxSyncWatchdogComms}; use crate::{ @@ -495,6 +496,7 @@ impl RunLoop { "chains-coordinator-{}", &moved_config.node.rpc_bind )) + .stack_size(BLOCK_PROCESSOR_STACK_SIZE) .spawn(move || { let mut cost_estimator = moved_config.make_cost_estimator(); let mut fee_estimator = moved_config.make_fee_estimator(); From 8f7983ead2d65a0054008c2f6ab5b1824ed70a11 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 9 Nov 2022 10:57:43 -0500 Subject: [PATCH 108/116] fix: multiple imports merge artifact --- testnet/stacks-node/src/run_loop/neon.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index bd72cc7407..df0be2f926 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -37,7 +37,6 @@ use crate::neon_node::Globals; use crate::neon_node::StacksNode; use crate::neon_node::BLOCK_PROCESSOR_STACK_SIZE; use crate::neon_node::RELAYER_MAX_BUFFER; -use crate::neon_node::BLOCK_PROCESSOR_STACK_SIZE; use crate::node::use_test_genesis_chainstate; use crate::syncctl::{PoxSyncWatchdog, PoxSyncWatchdogComms}; use crate::{ From f4d10bcd900195edfd525e7b9f344c84a679a257 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 10 Nov 2022 18:32:40 -0500 Subject: [PATCH 109/116] refactor: put tests in the right place --- src/burnchains/db.rs | 288 ------------------------------------ src/burnchains/tests/db.rs | 292 +++++++++++++++++++++++++++++++++++++ 2 files changed, 292 insertions(+), 288 deletions(-) diff --git a/src/burnchains/db.rs b/src/burnchains/db.rs index 859547b7b6..3b2a66fc04 100644 --- a/src/burnchains/db.rs +++ b/src/burnchains/db.rs @@ -1532,292 +1532,4 @@ impl BurnchainDB { Ok(heaviest_am) } - - #[test] - fn test_classify_delegate_stx() { - let first_bhh = BurnchainHeaderHash([0; 32]); - let first_timestamp = 321; - let first_height = 1; - - let mut burnchain_db = - BurnchainDB::connect(":memory:", first_height, &first_bhh, first_timestamp, true) - .unwrap(); - - let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = PoxConstants::test_default(); - - let first_block_header = burnchain_db.get_canonical_chain_tip().unwrap(); - assert_eq!(&first_block_header.block_hash, &first_bhh); - assert_eq!(&first_block_header.block_height, &first_height); - assert_eq!(&first_block_header.timestamp, &first_timestamp); - assert_eq!( - &first_block_header.parent_block_hash, - &BurnchainHeaderHash::sentinel() - ); - - let canon_hash = BurnchainHeaderHash([1; 32]); - - let canonical_block = BurnchainBlock::Bitcoin(BitcoinBlock::new( - 500, - &canon_hash, - &first_bhh, - &vec![], - 485, - )); - let ops = burnchain_db - .store_new_burnchain_block(&burnchain, &canonical_block, StacksEpochId::Epoch21) - .unwrap(); - assert_eq!(ops.len(), 0); - - // let's mine a block with a pre-stx tx, and an invalid delegate-stx tx, - // the delegate-stx tx should _fail_ to verify, because there's it - // doesn't set the txid of the pre-stx in its input. - - let parser = BitcoinBlockParser::new(BitcoinNetworkType::Testnet, BLOCKSTACK_MAGIC_MAINNET); - - let pre_delegate_stx_0_txid = Txid([5; 32]); - let pre_delegate_stx_0 = BitcoinTransaction { - txid: pre_delegate_stx_0_txid.clone(), - vtxindex: 0, - opcode: Opcodes::PreStx as u8, - data: vec![0; 80], - data_amt: 0, - inputs: vec![BitcoinTxInputStructured { - keys: vec![], - num_required: 0, - in_type: BitcoinInputType::Standard, - tx_ref: (Txid([0; 32]), 1), - } - .into()], - outputs: vec![BitcoinTxOutput { - units: 10, - address: LegacyBitcoinAddress { - addrtype: LegacyBitcoinAddressType::PublicKeyHash, - network_id: BitcoinNetworkType::Mainnet, - bytes: Hash160([1; 20]), - } - .into(), - }], - }; - - // Set up the data field for the delegate stx transactions - let mut data = vec![1; 80]; - // Make it so that reward_addr_index = Some(1) - for i in 17..20 { - data[i] = 0; - } - - // this one will not have a corresponding pre_stx tx. - let delegate_stx_0 = BitcoinTransaction { - txid: Txid([4; 32]), - vtxindex: 1, - opcode: Opcodes::DelegateStx as u8, - data: data.clone(), - data_amt: 0, - inputs: vec![BitcoinTxInputStructured { - keys: vec![], - num_required: 0, - in_type: BitcoinInputType::Standard, - tx_ref: (Txid([0; 32]), 1), - } - .into()], - outputs: vec![BitcoinTxOutput { - units: 10, - address: LegacyBitcoinAddress { - addrtype: LegacyBitcoinAddressType::PublicKeyHash, - network_id: BitcoinNetworkType::Mainnet, - bytes: Hash160([1; 20]), - } - .into(), - }], - }; - - // this one will have a corresponding pre_stx tx. - let delegate_stx_0_second_attempt = BitcoinTransaction { - txid: Txid([4; 32]), - vtxindex: 2, - opcode: Opcodes::DelegateStx as u8, - data: data.clone(), - data_amt: 0, - inputs: vec![BitcoinTxInputStructured { - keys: vec![], - num_required: 0, - in_type: BitcoinInputType::Standard, - tx_ref: (pre_delegate_stx_0_txid.clone(), 1), - } - .into()], - outputs: vec![ - BitcoinTxOutput { - units: 10, - address: LegacyBitcoinAddress { - addrtype: LegacyBitcoinAddressType::PublicKeyHash, - network_id: BitcoinNetworkType::Mainnet, - bytes: Hash160([2; 20]), - } - .into(), - }, - BitcoinTxOutput { - units: 10, - address: LegacyBitcoinAddress { - addrtype: LegacyBitcoinAddressType::PublicKeyHash, - network_id: BitcoinNetworkType::Mainnet, - bytes: Hash160([1; 20]), - } - .into(), - }, - ], - }; - - // this one won't have a corresponding pre_stx tx. - let delegate_stx_1 = BitcoinTransaction { - txid: Txid([3; 32]), - vtxindex: 3, - opcode: Opcodes::DelegateStx as u8, - data: data.clone(), - data_amt: 0, - inputs: vec![BitcoinTxInputStructured { - keys: vec![], - num_required: 0, - in_type: BitcoinInputType::Standard, - tx_ref: (Txid([0; 32]), 1), - } - .into()], - outputs: vec![BitcoinTxOutput { - units: 10, - address: LegacyBitcoinAddress { - addrtype: LegacyBitcoinAddressType::PublicKeyHash, - network_id: BitcoinNetworkType::Mainnet, - bytes: Hash160([1; 20]), - } - .into(), - }], - }; - - // This one won't use the correct output from the pre stx transaction. - // It tries to use the second output from the pre stx tx, which DNE. - let delegate_stx_2 = BitcoinTransaction { - txid: Txid([8; 32]), - vtxindex: 4, - opcode: Opcodes::DelegateStx as u8, - data: data.clone(), - data_amt: 0, - inputs: vec![BitcoinTxInputStructured { - keys: vec![], - num_required: 0, - in_type: BitcoinInputType::Standard, - tx_ref: (pre_delegate_stx_0_txid.clone(), 2), - } - .into()], - outputs: vec![BitcoinTxOutput { - units: 10, - address: LegacyBitcoinAddress { - addrtype: LegacyBitcoinAddressType::PublicKeyHash, - network_id: BitcoinNetworkType::Mainnet, - bytes: Hash160([1; 20]), - } - .into(), - }], - }; - - let ops_0 = vec![pre_delegate_stx_0, delegate_stx_0]; - - let ops_1 = vec![ - delegate_stx_1, - delegate_stx_0_second_attempt, - delegate_stx_2, - ]; - - let block_height_0 = 501; - let block_hash_0 = BurnchainHeaderHash([2; 32]); - let block_height_1 = 502; - let block_hash_1 = BurnchainHeaderHash([3; 32]); - - let block_0 = BurnchainBlock::Bitcoin(BitcoinBlock::new( - block_height_0, - &block_hash_0, - &first_bhh, - &ops_0, - 350, - )); - - let block_1 = BurnchainBlock::Bitcoin(BitcoinBlock::new( - block_height_1, - &block_hash_1, - &block_hash_0, - &ops_1, - 360, - )); - - let processed_ops_0 = burnchain_db - .store_new_burnchain_block(&burnchain, &block_0, StacksEpochId::Epoch21) - .unwrap(); - - assert_eq!( - processed_ops_0.len(), - 1, - "Only pre_delegate_stx op should have been accepted" - ); - - let processed_ops_1 = burnchain_db - .store_new_burnchain_block(&burnchain, &block_1, StacksEpochId::Epoch21) - .unwrap(); - - assert_eq!( - processed_ops_1.len(), - 1, - "Only one delegate_stx op should have been accepted" - ); - - let expected_pre_delegate_addr = StacksAddress::from_legacy_bitcoin_address( - &LegacyBitcoinAddress { - addrtype: LegacyBitcoinAddressType::PublicKeyHash, - network_id: BitcoinNetworkType::Mainnet, - bytes: Hash160([1; 20]), - } - .into(), - ); - - let expected_delegate_addr = PoxAddress::Standard( - StacksAddress::from_legacy_bitcoin_address( - &LegacyBitcoinAddress { - addrtype: LegacyBitcoinAddressType::PublicKeyHash, - network_id: BitcoinNetworkType::Mainnet, - bytes: Hash160([2; 20]), - } - .into(), - ), - Some(AddressHashMode::SerializeP2PKH), - ); - - let expected_reward_addr = Some(( - 1, - PoxAddress::Standard( - StacksAddress::from_legacy_bitcoin_address( - &LegacyBitcoinAddress { - addrtype: LegacyBitcoinAddressType::PublicKeyHash, - network_id: BitcoinNetworkType::Mainnet, - bytes: Hash160([1; 20]), - } - .into(), - ), - Some(AddressHashMode::SerializeP2PKH), - ), - )); - - if let BlockstackOperationType::PreStx(op) = &processed_ops_0[0] { - assert_eq!(&op.output, &expected_pre_delegate_addr); - } else { - panic!("EXPECTED to parse a pre delegate stx op"); - } - - if let BlockstackOperationType::DelegateStx(op) = &processed_ops_1[0] { - assert_eq!(&op.sender, &expected_pre_delegate_addr); - assert_eq!(op.delegated_ustx, u128::from_be_bytes([1; 16])); - assert_eq!(op.delegate_to, StacksAddress::new(22, Hash160([2u8; 20]))); - assert_eq!(&op.reward_addr, &expected_reward_addr); - assert_eq!(op.until_burn_height, Some(u64::from_be_bytes([1; 8]))); - } else { - panic!("EXPECTED to parse a delegate stx op"); - } - } } diff --git a/src/burnchains/tests/db.rs b/src/burnchains/tests/db.rs index 058c151557..93e348d6d1 100644 --- a/src/burnchains/tests/db.rs +++ b/src/burnchains/tests/db.rs @@ -798,3 +798,295 @@ fn test_update_block_descendancy() { assert_eq!(invalid_am_id.unwrap(), 0); } } + +#[test] +fn test_classify_delegate_stx() { + let first_bhh = BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(); + let first_timestamp = 321; + let first_height = 0; + + let mut burnchain = Burnchain::regtest(":memory:"); + let mut burnchain_db = BurnchainDB::connect(":memory:", &burnchain, true).unwrap(); + + burnchain.pox_constants = PoxConstants::test_default(); + + let first_block_header = burnchain_db.get_canonical_chain_tip().unwrap(); + assert_eq!(&first_block_header.block_hash, &first_bhh); + assert_eq!(&first_block_header.block_height, &first_height); + assert_eq!( + &first_block_header.parent_block_hash, + &BurnchainHeaderHash::sentinel() + ); + + let canon_hash = BurnchainHeaderHash([1; 32]); + + let canonical_block = BurnchainBlock::Bitcoin(BitcoinBlock::new( + 500, + &canon_hash, + &first_bhh, + &vec![], + 485, + )); + let headers = vec![first_block_header.clone()]; + + let ops = burnchain_db + .store_new_burnchain_block( + &burnchain, + &headers, + &canonical_block, + StacksEpochId::Epoch21, + ) + .unwrap(); + assert_eq!(ops.len(), 0); + + // let's mine a block with a pre-stx tx, and an invalid delegate-stx tx, + // the delegate-stx tx should _fail_ to verify, because there's it + // doesn't set the txid of the pre-stx in its input. + + let parser = BitcoinBlockParser::new(BitcoinNetworkType::Testnet, BLOCKSTACK_MAGIC_MAINNET); + + let pre_delegate_stx_0_txid = Txid([5; 32]); + let pre_delegate_stx_0 = BitcoinTransaction { + txid: pre_delegate_stx_0_txid.clone(), + vtxindex: 0, + opcode: Opcodes::PreStx as u8, + data: vec![0; 80], + data_amt: 0, + inputs: vec![BitcoinTxInputStructured { + keys: vec![], + num_required: 0, + in_type: BitcoinInputType::Standard, + tx_ref: (Txid([0; 32]), 1), + } + .into()], + outputs: vec![BitcoinTxOutput { + units: 10, + address: LegacyBitcoinAddress { + addrtype: LegacyBitcoinAddressType::PublicKeyHash, + network_id: BitcoinNetworkType::Mainnet, + bytes: Hash160([1; 20]), + } + .into(), + }], + }; + + // Set up the data field for the delegate stx transactions + let mut data = vec![1; 80]; + // Make it so that reward_addr_index = Some(1) + for i in 17..20 { + data[i] = 0; + } + + // this one will not have a corresponding pre_stx tx. + let delegate_stx_0 = BitcoinTransaction { + txid: Txid([4; 32]), + vtxindex: 1, + opcode: Opcodes::DelegateStx as u8, + data: data.clone(), + data_amt: 0, + inputs: vec![BitcoinTxInputStructured { + keys: vec![], + num_required: 0, + in_type: BitcoinInputType::Standard, + tx_ref: (Txid([0; 32]), 1), + } + .into()], + outputs: vec![BitcoinTxOutput { + units: 10, + address: LegacyBitcoinAddress { + addrtype: LegacyBitcoinAddressType::PublicKeyHash, + network_id: BitcoinNetworkType::Mainnet, + bytes: Hash160([1; 20]), + } + .into(), + }], + }; + + // this one will have a corresponding pre_stx tx. + let delegate_stx_0_second_attempt = BitcoinTransaction { + txid: Txid([4; 32]), + vtxindex: 2, + opcode: Opcodes::DelegateStx as u8, + data: data.clone(), + data_amt: 0, + inputs: vec![BitcoinTxInputStructured { + keys: vec![], + num_required: 0, + in_type: BitcoinInputType::Standard, + tx_ref: (pre_delegate_stx_0_txid.clone(), 1), + } + .into()], + outputs: vec![ + BitcoinTxOutput { + units: 10, + address: LegacyBitcoinAddress { + addrtype: LegacyBitcoinAddressType::PublicKeyHash, + network_id: BitcoinNetworkType::Mainnet, + bytes: Hash160([2; 20]), + } + .into(), + }, + BitcoinTxOutput { + units: 10, + address: LegacyBitcoinAddress { + addrtype: LegacyBitcoinAddressType::PublicKeyHash, + network_id: BitcoinNetworkType::Mainnet, + bytes: Hash160([1; 20]), + } + .into(), + }, + ], + }; + + // this one won't have a corresponding pre_stx tx. + let delegate_stx_1 = BitcoinTransaction { + txid: Txid([3; 32]), + vtxindex: 3, + opcode: Opcodes::DelegateStx as u8, + data: data.clone(), + data_amt: 0, + inputs: vec![BitcoinTxInputStructured { + keys: vec![], + num_required: 0, + in_type: BitcoinInputType::Standard, + tx_ref: (Txid([0; 32]), 1), + } + .into()], + outputs: vec![BitcoinTxOutput { + units: 10, + address: LegacyBitcoinAddress { + addrtype: LegacyBitcoinAddressType::PublicKeyHash, + network_id: BitcoinNetworkType::Mainnet, + bytes: Hash160([1; 20]), + } + .into(), + }], + }; + + // This one won't use the correct output from the pre stx transaction. + // It tries to use the second output from the pre stx tx, which DNE. + let delegate_stx_2 = BitcoinTransaction { + txid: Txid([8; 32]), + vtxindex: 4, + opcode: Opcodes::DelegateStx as u8, + data: data.clone(), + data_amt: 0, + inputs: vec![BitcoinTxInputStructured { + keys: vec![], + num_required: 0, + in_type: BitcoinInputType::Standard, + tx_ref: (pre_delegate_stx_0_txid.clone(), 2), + } + .into()], + outputs: vec![BitcoinTxOutput { + units: 10, + address: LegacyBitcoinAddress { + addrtype: LegacyBitcoinAddressType::PublicKeyHash, + network_id: BitcoinNetworkType::Mainnet, + bytes: Hash160([1; 20]), + } + .into(), + }], + }; + + let ops_0 = vec![pre_delegate_stx_0, delegate_stx_0]; + + let ops_1 = vec![ + delegate_stx_1, + delegate_stx_0_second_attempt, + delegate_stx_2, + ]; + + let block_height_0 = 501; + let block_hash_0 = BurnchainHeaderHash([2; 32]); + let block_height_1 = 502; + let block_hash_1 = BurnchainHeaderHash([3; 32]); + + let block_0 = BurnchainBlock::Bitcoin(BitcoinBlock::new( + block_height_0, + &block_hash_0, + &first_bhh, + &ops_0, + 350, + )); + + let block_1 = BurnchainBlock::Bitcoin(BitcoinBlock::new( + block_height_1, + &block_hash_1, + &block_hash_0, + &ops_1, + 360, + )); + + let processed_ops_0 = burnchain_db + .store_new_burnchain_block(&burnchain, &headers, &block_0, StacksEpochId::Epoch21) + .unwrap(); + + assert_eq!( + processed_ops_0.len(), + 1, + "Only pre_delegate_stx op should have been accepted" + ); + + let processed_ops_1 = burnchain_db + .store_new_burnchain_block(&burnchain, &headers, &block_1, StacksEpochId::Epoch21) + .unwrap(); + + assert_eq!( + processed_ops_1.len(), + 1, + "Only one delegate_stx op should have been accepted" + ); + + let expected_pre_delegate_addr = StacksAddress::from_legacy_bitcoin_address( + &LegacyBitcoinAddress { + addrtype: LegacyBitcoinAddressType::PublicKeyHash, + network_id: BitcoinNetworkType::Mainnet, + bytes: Hash160([1; 20]), + } + .into(), + ); + + let expected_delegate_addr = PoxAddress::Standard( + StacksAddress::from_legacy_bitcoin_address( + &LegacyBitcoinAddress { + addrtype: LegacyBitcoinAddressType::PublicKeyHash, + network_id: BitcoinNetworkType::Mainnet, + bytes: Hash160([2; 20]), + } + .into(), + ), + Some(AddressHashMode::SerializeP2PKH), + ); + + let expected_reward_addr = Some(( + 1, + PoxAddress::Standard( + StacksAddress::from_legacy_bitcoin_address( + &LegacyBitcoinAddress { + addrtype: LegacyBitcoinAddressType::PublicKeyHash, + network_id: BitcoinNetworkType::Mainnet, + bytes: Hash160([1; 20]), + } + .into(), + ), + Some(AddressHashMode::SerializeP2PKH), + ), + )); + + if let BlockstackOperationType::PreStx(op) = &processed_ops_0[0] { + assert_eq!(&op.output, &expected_pre_delegate_addr); + } else { + panic!("EXPECTED to parse a pre delegate stx op"); + } + + if let BlockstackOperationType::DelegateStx(op) = &processed_ops_1[0] { + assert_eq!(&op.sender, &expected_pre_delegate_addr); + assert_eq!(op.delegated_ustx, u128::from_be_bytes([1; 16])); + assert_eq!(op.delegate_to, StacksAddress::new(22, Hash160([2u8; 20]))); + assert_eq!(&op.reward_addr, &expected_reward_addr); + assert_eq!(op.until_burn_height, Some(u64::from_be_bytes([1; 8]))); + } else { + panic!("EXPECTED to parse a delegate stx op"); + } +} From e887818b82de03398ca17ae5e78b9411fa44b04b Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 10 Nov 2022 18:32:57 -0500 Subject: [PATCH 110/116] fix: use the right function name --- src/chainstate/burn/db/sortdb.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/chainstate/burn/db/sortdb.rs b/src/chainstate/burn/db/sortdb.rs index 05b73c8d1e..5888b5aa9e 100644 --- a/src/chainstate/burn/db/sortdb.rs +++ b/src/chainstate/burn/db/sortdb.rs @@ -73,8 +73,8 @@ use crate::util_lib::db::tx_busy_handler; use crate::util_lib::db::DBTx; use crate::util_lib::db::Error as db_error; use crate::util_lib::db::{ - db_mkdirs, query_count, query_row, query_row_columns, query_row_panic, query_rows, sql_pragma, - u64_opt_to_sql, u64_to_sql, DBConn, FromColumn, FromRow, IndexDBConn, IndexDBTx, + db_mkdirs, opt_u64_to_sql, query_count, query_row, query_row_columns, query_row_panic, + query_rows, sql_pragma, u64_to_sql, DBConn, FromColumn, FromRow, IndexDBConn, IndexDBTx, }; use clarity::vm::ast::ASTRules; use clarity::vm::representations::{ClarityName, ContractName}; @@ -4410,7 +4410,7 @@ impl<'a> SortitionHandleTx<'a> { &op.delegate_to.to_string(), &serde_json::to_string(&op.reward_addr).unwrap(), &op.delegated_ustx.to_string(), - &u64_opt_to_sql(op.until_burn_height)?, + &opt_u64_to_sql(op.until_burn_height)?, ]; self.execute("REPLACE INTO delegate_stx (txid, vtxindex, block_height, burn_header_hash, sender_addr, delegate_to, reward_addr, delegated_ustx, until_burn_height) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9)", args)?; From 766627a1561a90652219ea3fcbd0191226555a69 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 10 Nov 2022 18:33:09 -0500 Subject: [PATCH 111/116] chore: API sync --- src/chainstate/coordinator/tests.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/chainstate/coordinator/tests.rs b/src/chainstate/coordinator/tests.rs index 518b57bbcc..398d4b47e2 100644 --- a/src/chainstate/coordinator/tests.rs +++ b/src/chainstate/coordinator/tests.rs @@ -1355,6 +1355,7 @@ fn missed_block_commits_2_1() { 0, false, last_input.as_ref().unwrap().clone(), + None, ); // NOTE: intended for block block_height - 2 last_input = Some(( @@ -1409,6 +1410,7 @@ fn missed_block_commits_2_1() { 0, false, last_input.as_ref().unwrap().clone(), + None, ) }; @@ -1422,6 +1424,7 @@ fn missed_block_commits_2_1() { if ix % (MINING_COMMITMENT_WINDOW as usize) == 3 { // produce an empty block! produce_burn_block( + &b, &mut burnchain, &burnchain_tip.block_hash, vec![], @@ -1438,6 +1441,7 @@ fn missed_block_commits_2_1() { }, )); produce_burn_block_do_not_set_height( + &b, &mut burnchain, &burnchain_tip.block_hash, ops, @@ -1688,6 +1692,7 @@ fn late_block_commits_2_1() { 0, false, last_input.as_ref().unwrap().clone(), + None, ); // NOTE: intended for block block_height - 3 last_input = Some(( @@ -1742,6 +1747,7 @@ fn late_block_commits_2_1() { 0, false, last_input.as_ref().unwrap().clone(), + None, ) }; @@ -1755,6 +1761,7 @@ fn late_block_commits_2_1() { if ix % (MINING_COMMITMENT_WINDOW as usize) == 3 { // produce an empty block! produce_burn_block( + &b, &mut burnchain, &burnchain_tip.block_hash, vec![], @@ -1771,6 +1778,7 @@ fn late_block_commits_2_1() { }, )); produce_burn_block_do_not_set_height( + &b, &mut burnchain, &burnchain_tip.block_hash, ops, @@ -3498,6 +3506,7 @@ fn test_delegate_stx_btc_ops() { let burnchain_tip = burnchain.get_canonical_chain_tip().unwrap(); produce_burn_block( + &b, &mut burnchain, &burnchain_tip.block_hash, ops, @@ -4167,6 +4176,7 @@ fn test_epoch_switch_pox_contract_instantiation() { let burnchain_tip = burnchain.get_canonical_chain_tip().unwrap(); produce_burn_block( + &b, &mut burnchain, &burnchain_tip.block_hash, ops, @@ -4495,6 +4505,7 @@ fn test_epoch_verify_active_pox_contract() { let burnchain_tip = burnchain.get_canonical_chain_tip().unwrap(); produce_burn_block( + &b, &mut burnchain, &burnchain_tip.block_hash, ops, From d9728bff738fda230401b1b8f65cb7d9b39d6b1d Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 10 Nov 2022 18:33:18 -0500 Subject: [PATCH 112/116] chore: API sync --- src/chainstate/stacks/db/blocks.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/chainstate/stacks/db/blocks.rs b/src/chainstate/stacks/db/blocks.rs index 895cb2ed20..791dd98555 100644 --- a/src/chainstate/stacks/db/blocks.rs +++ b/src/chainstate/stacks/db/blocks.rs @@ -11823,7 +11823,7 @@ pub mod test { delegated_ustx: ((tenure_id + 1) * 1000) as u128, until_burn_height: None, // to be filled in - txid: Txid_from_test_data( + txid: Txid::from_test_data( tenure_id as u64, 2, &BurnchainHeaderHash([tenure_id as u8; 32]), From 67523618b799529553a41792c4797450229812f1 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 10 Nov 2022 18:33:27 -0500 Subject: [PATCH 113/116] chore: choose consistent name for this function --- src/util_lib/db.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/util_lib/db.rs b/src/util_lib/db.rs index 031fce0ed2..f88441a132 100644 --- a/src/util_lib/db.rs +++ b/src/util_lib/db.rs @@ -283,7 +283,7 @@ pub fn u64_to_sql(x: u64) -> Result { Ok(x as i64) } -pub fn u64_opt_to_sql(x: Option) -> Result, Error> { +pub fn opt_u64_to_sql(x: Option) -> Result, Error> { match x { Some(num) => { if num > (i64::MAX as u64) { From 32d37994aa74dd3c43b9f4f4f8ef12786a9308f3 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 10 Nov 2022 21:59:38 -0500 Subject: [PATCH 114/116] fix: only instantiate indexes if we need to --- src/burnchains/db.rs | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/src/burnchains/db.rs b/src/burnchains/db.rs index 3b2a66fc04..60a79bbb4f 100644 --- a/src/burnchains/db.rs +++ b/src/burnchains/db.rs @@ -288,6 +288,7 @@ CREATE TABLE db_config(version TEXT NOT NULL); INSERT INTO affirmation_maps(affirmation_id,weight,affirmation_map) VALUES (0,0,""); "#; +const LAST_BURNCHAIN_DB_INDEX: &'static str = "index_burnchain_db_txid"; const BURNCHAIN_DB_INDEXES: &'static [&'static str] = &[ "CREATE INDEX IF NOT EXISTS index_burnchain_db_block_headers_height_hash ON burnchain_db_block_headers(block_height DESC, block_hash ASC);", "CREATE INDEX IF NOT EXISTS index_burnchain_db_block_hash ON burnchain_db_block_ops(block_hash);", @@ -917,12 +918,19 @@ impl<'a> BurnchainDBTransaction<'a> { impl BurnchainDB { fn add_indexes(&mut self) -> Result<(), BurnchainError> { - // TODO: only do this if the DB didn't already have them - let db_tx = self.tx_begin()?; - for index in BURNCHAIN_DB_INDEXES.iter() { - db_tx.sql_tx.execute_batch(index)?; + let exists: i64 = query_row( + self.conn(), + "SELECT 1 FROM sqlite_master WHERE type = 'index' AND name = ?1", + &[LAST_BURNCHAIN_DB_INDEX], + )? + .unwrap_or(0); + if exists == 0 { + let db_tx = self.tx_begin()?; + for index in BURNCHAIN_DB_INDEXES.iter() { + db_tx.sql_tx.execute_batch(index)?; + } + db_tx.commit()?; } - db_tx.commit()?; Ok(()) } From 3b93a754d9dadc922af206918b002ba5e863efe7 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 10 Nov 2022 21:59:52 -0500 Subject: [PATCH 115/116] fix: new in 2.1: a reward cycle can be home to at most one anchor block (something this test didn't account for) --- src/chainstate/coordinator/tests.rs | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/src/chainstate/coordinator/tests.rs b/src/chainstate/coordinator/tests.rs index 398d4b47e2..d09a3075e8 100644 --- a/src/chainstate/coordinator/tests.rs +++ b/src/chainstate/coordinator/tests.rs @@ -3520,8 +3520,19 @@ fn test_delegate_stx_btc_ops() { started_first_reward_cycle = true; // store the anchor block for this sortition for later checking let ic = sort_db.index_handle_at_tip(); - let bhh = ic.get_last_anchor_block_hash().unwrap().unwrap(); - anchor_blocks.push(bhh); + let bhh_opt = ic.get_last_anchor_block_hash().unwrap(); + if new_burnchain_tip.block_height == 31 { + // **New in 2.1** -- a reward cycle can contain an anchor block for at most one other reward cycle. + // Usually, cycle N contains the anchor block for cycle N+1. + // Here, cycle 6 confirms the *same* anchor block as in cycle 5, so it must have + // _no_ anchor block. + assert!( + bhh_opt.is_none(), + "FATAL: a reward cycle contains two anchor blocks" + ); + } else { + anchor_blocks.push(bhh_opt.unwrap()); + } } let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); From 69294d5671839d0e14ee9e6cc552ff2777565263 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 10 Nov 2022 22:00:15 -0500 Subject: [PATCH 116/116] fix: burnchain regtest must now start with regtest header --- src/chainstate/stacks/boot/pox_2_tests.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/chainstate/stacks/boot/pox_2_tests.rs b/src/chainstate/stacks/boot/pox_2_tests.rs index 0f37157764..5d61379bbc 100644 --- a/src/chainstate/stacks/boot/pox_2_tests.rs +++ b/src/chainstate/stacks/boot/pox_2_tests.rs @@ -3899,7 +3899,10 @@ fn stack_aggregation_increase() { // tenures start being tracked. let EMPTY_SORTITIONS = 25; - let mut burnchain = Burnchain::default_unittest(0, &BurnchainHeaderHash::zero()); + let mut burnchain = Burnchain::default_unittest( + 0, + &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), + ); burnchain.pox_constants.reward_cycle_length = 5; burnchain.pox_constants.prepare_length = 2; burnchain.pox_constants.anchor_threshold = 1;