diff --git a/chain/chain/src/chain.rs b/chain/chain/src/chain.rs index dae043eeb2b..7289a958af3 100644 --- a/chain/chain/src/chain.rs +++ b/chain/chain/src/chain.rs @@ -91,7 +91,12 @@ pub const NUM_ORPHAN_ANCESTORS_CHECK: u64 = 3; // It should almost never be hit const MAX_ORPHAN_MISSING_CHUNKS: usize = 5; +/// 10000 years in seconds. Big constant for sandbox to allow time traveling. +#[cfg(feature = "sandbox")] +const ACCEPTABLE_TIME_DIFFERENCE: i64 = 60 * 60 * 24 * 365 * 10000; + /// Refuse blocks more than this many block intervals in the future (as in bitcoin). +#[cfg(not(feature = "sandbox"))] const ACCEPTABLE_TIME_DIFFERENCE: i64 = 12 * 10; /// Over this block height delta in advance if we are not chunk producer - route tx to upcoming validators. @@ -2233,7 +2238,7 @@ impl Chain { blocks_catch_up_state: &mut BlocksCatchUpState, block_catch_up_scheduler: &dyn Fn(BlockCatchUpRequest), ) -> Result<(), Error> { - debug!(target:"catchup", "catch up blocks: pending blocks: {:?}, processed {:?}, scheduled: {:?}, done: {:?}", + debug!(target:"catchup", "catch up blocks: pending blocks: {:?}, processed {:?}, scheduled: {:?}, done: {:?}", blocks_catch_up_state.pending_blocks, blocks_catch_up_state.processed_blocks.keys().collect::>(), blocks_catch_up_state.scheduled_blocks.keys().collect::>(), blocks_catch_up_state.done_blocks.len()); for (queued_block, (saved_store_update, results)) in diff --git a/chain/chain/src/tests/simple_chain.rs b/chain/chain/src/tests/simple_chain.rs index cbb8dc5dfae..b1b23acbde0 100644 --- a/chain/chain/src/tests/simple_chain.rs +++ b/chain/chain/src/tests/simple_chain.rs @@ -116,6 +116,7 @@ fn build_chain_with_orhpans() { &*signer, *last_block.header().next_bp_hash(), CryptoHash::default(), + None, ); assert_eq!(chain.process_block_test(&None, block).unwrap_err().kind(), ErrorKind::Orphan); assert_eq!( diff --git a/chain/client/src/client.rs b/chain/client/src/client.rs index c99abbda773..56b26f997eb 100644 --- a/chain/client/src/client.rs +++ b/chain/client/src/client.rs @@ -36,7 +36,7 @@ use near_primitives::transaction::SignedTransaction; use near_primitives::types::chunk_extra::ChunkExtra; use near_primitives::types::{AccountId, ApprovalStake, BlockHeight, EpochId, NumBlocks, ShardId}; use near_primitives::unwrap_or_return; -use near_primitives::utils::{to_timestamp, MaybeValidated}; +use near_primitives::utils::MaybeValidated; use near_primitives::validator_signer::ValidatorSigner; use crate::chunks_delay_tracker::ChunksDelayTracker; @@ -69,6 +69,10 @@ pub struct Client { #[cfg(feature = "test_features")] pub adv_produce_blocks_only_valid: bool, + /// Fast Forward accrued delta height used to calculate fast forwarded timestamps for each block. + #[cfg(feature = "sandbox")] + pub(crate) accrued_fastforward_delta: near_primitives::types::BlockHeightDelta, + pub config: ClientConfig, pub sync_status: SyncStatus, pub chain: Chain, @@ -200,6 +204,8 @@ impl Client { adv_produce_blocks: false, #[cfg(feature = "test_features")] adv_produce_blocks_only_valid: false, + #[cfg(feature = "sandbox")] + accrued_fastforward_delta: 0, config, sync_status, chain, @@ -473,6 +479,11 @@ impl Client { prev_next_bp_hash }; + #[cfg(feature = "sandbox")] + let timestamp_override = Some(Clock::utc() + self.sandbox_delta_time()); + #[cfg(not(feature = "sandbox"))] + let timestamp_override = None; + // Get block extra from previous block. let mut block_merkle_tree = self.chain.mut_store().get_block_merkle_tree(&prev_hash)?.clone(); @@ -542,12 +553,13 @@ impl Client { &*validator_signer, next_bp_hash, block_merkle_root, + timestamp_override, ); // Update latest known even before returning block out, to prevent race conditions. self.chain.mut_store().save_latest_known(LatestKnown { height: next_height, - seen: to_timestamp(Clock::utc()), + seen: block.header().raw_timestamp(), })?; metrics::BLOCK_PRODUCED_TOTAL.inc(); @@ -988,6 +1000,22 @@ impl Client { Ok(()) } + /// Gets the advanced timestamp delta in nanoseconds for sandbox once it has been fast-forwarded + #[cfg(feature = "sandbox")] + pub fn sandbox_delta_time(&self) -> chrono::Duration { + let avg_block_prod_time = (self.config.min_block_production_delay.as_nanos() + + self.config.max_block_production_delay.as_nanos()) + / 2; + let ns = (self.accrued_fastforward_delta as u128 * avg_block_prod_time).try_into().expect( + &format!( + "Too high of a delta_height {} to convert into u64", + self.accrued_fastforward_delta + ), + ); + + chrono::Duration::nanoseconds(ns) + } + pub fn send_approval( &mut self, parent_hash: &CryptoHash, diff --git a/chain/client/src/client_actor.rs b/chain/client/src/client_actor.rs index 668e8e49da9..fb3e5ef090d 100644 --- a/chain/client/src/client_actor.rs +++ b/chain/client/src/client_actor.rs @@ -101,7 +101,7 @@ pub struct ClientActor { state_parts_client_arbiter: Arbiter, #[cfg(feature = "sandbox")] - fastforward_delta: Option, + fastforward_delta: near_primitives::types::BlockHeightDelta, /// Synchronization measure to allow graceful shutdown. /// Informs the system when a ClientActor gets dropped. @@ -204,7 +204,7 @@ impl ClientActor { state_parts_client_arbiter: state_parts_arbiter, #[cfg(feature = "sandbox")] - fastforward_delta: None, + fastforward_delta: 0, _shutdown_signal: shutdown_signal, }) } @@ -384,9 +384,22 @@ impl ClientActor { ) } near_network_primitives::types::NetworkSandboxMessage::SandboxFastForward(delta_height) => { - self.fastforward_delta = Some(delta_height); + if self.fastforward_delta > 0 { + return NetworkClientResponses::SandboxResult( + near_network_primitives::types::SandboxResponse::SandboxFastForwardFailed( + "Consecutive fast_forward requests cannot be made while a current one is going on.".to_string())); + } + + self.fastforward_delta = delta_height; NetworkClientResponses::NoResponse } + near_network_primitives::types::NetworkSandboxMessage::SandboxFastForwardStatus => { + NetworkClientResponses::SandboxResult( + near_network_primitives::types::SandboxResponse::SandboxFastForwardFinished( + self.fastforward_delta == 0, + ), + ) + } }; } NetworkClientMessages::Transaction { transaction, is_forwarded, check_only } => { @@ -890,6 +903,84 @@ impl ClientActor { } } + /// Process the sandbox fast forward request. If the change in block height is past an epoch, + /// we fast forward to just right before the epoch, produce some blocks to get past and into + /// a new epoch, then we continue on with the residual amount to fast forward. + #[cfg(feature = "sandbox")] + fn sandbox_process_fast_forward( + &mut self, + block_height: BlockHeight, + ) -> Result, Error> { + let mut delta_height = std::mem::replace(&mut self.fastforward_delta, 0); + if delta_height == 0 { + return Ok(None); + } + + let epoch_length = self.client.config.epoch_length; + if epoch_length <= 3 { + return Err(Error::Other( + "Unsupported: fast_forward with an epoch length of 3 or less".to_string(), + )); + } + + // Check if we are at epoch boundary. If we are, do not fast forward until new + // epoch is here. Decrement the fast_forward count by 1 when a block is produced + // during this period of waiting + let block_height_wrt_epoch = block_height % epoch_length; + if epoch_length - block_height_wrt_epoch <= 3 || block_height_wrt_epoch == 0 { + // wait for doomslug to call into produce block + self.fastforward_delta = delta_height; + return Ok(None); + } + + let delta_height = if block_height_wrt_epoch + delta_height >= epoch_length { + // fast forward to just right before epoch boundary to have epoch_manager + // handle the epoch_height updates as normal. `- 3` since this is being + // done 3 blocks before the epoch ends. + let right_before_epoch_update = epoch_length - block_height_wrt_epoch - 3; + + delta_height -= right_before_epoch_update; + self.fastforward_delta = delta_height; + right_before_epoch_update + } else { + delta_height + }; + + self.client.accrued_fastforward_delta += delta_height; + let delta_time = self.client.sandbox_delta_time(); + let new_latest_known = near_chain::types::LatestKnown { + height: block_height + delta_height, + seen: near_primitives::utils::to_timestamp(Clock::utc() + delta_time), + }; + + Ok(Some(new_latest_known)) + } + + fn pre_block_production(&mut self) -> Result<(), Error> { + #[cfg(feature = "sandbox")] + { + let latest_known = self.client.chain.mut_store().get_latest_known()?; + if let Some(new_latest_known) = + self.sandbox_process_fast_forward(latest_known.height)? + { + self.client.chain.mut_store().save_latest_known(new_latest_known.clone())?; + self.client.sandbox_update_tip(new_latest_known.height)?; + } + } + Ok(()) + } + + fn post_block_production(&mut self) { + #[cfg(feature = "sandbox")] + if self.fastforward_delta > 0 { + // Decrease the delta_height by 1 since we've produced a single block. This + // ensures that we advanced the right amount of blocks when fast forwarding + // and fast forwarding triggers regular block production in the case of + // stepping between epoch boundaries. + self.fastforward_delta -= 1; + } + } + /// Retrieves latest height, and checks if must produce next block. /// Otherwise wait for block arrival or suggest to skip after timeout. fn handle_block_production(&mut self) -> Result<(), Error> { @@ -900,23 +991,10 @@ impl ClientActor { let _ = self.client.check_and_update_doomslug_tip(); + self.pre_block_production()?; let head = self.client.chain.head()?; let latest_known = self.client.chain.mut_store().get_latest_known()?; - #[cfg(feature = "sandbox")] - let latest_known = if let Some(delta_height) = self.fastforward_delta.take() { - let new_latest_known = near_chain::types::LatestKnown { - height: latest_known.height + delta_height, - seen: near_primitives::utils::to_timestamp(Clock::utc()), - }; - - self.client.chain.mut_store().save_latest_known(new_latest_known.clone())?; - self.client.sandbox_update_tip(new_latest_known.height)?; - new_latest_known - } else { - latest_known - }; - assert!( head.height <= latest_known.height, "Latest known height is invalid {} vs {}", @@ -948,6 +1026,8 @@ impl ClientActor { if let Err(err) = self.produce_block(height) { // If there is an error, report it and let it retry on the next loop step. error!(target: "client", "Block production failed: {}", err); + } else { + self.post_block_production(); } } } diff --git a/chain/client/src/sync.rs b/chain/client/src/sync.rs index b6d0456fc16..7626d1212e4 100644 --- a/chain/client/src/sync.rs +++ b/chain/client/src/sync.rs @@ -1502,6 +1502,7 @@ mod test { &*signers[3], *last_block.header().next_bp_hash(), block_merkle_tree.root(), + None, ); block_merkle_tree.insert(*block.hash()); diff --git a/chain/client/src/test_utils.rs b/chain/client/src/test_utils.rs index 82983b40dc6..7f78d016582 100644 --- a/chain/client/src/test_utils.rs +++ b/chain/client/src/test_utils.rs @@ -66,6 +66,11 @@ use near_primitives::utils::MaybeValidated; pub type PeerManagerMock = Mocker; +/// min block production time in milliseconds +pub const MIN_BLOCK_PROD_TIME: Duration = Duration::from_millis(100); +/// max block production time in milliseconds +pub const MAX_BLOCK_PROD_TIME: Duration = Duration::from_millis(200); + const TEST_SEED: RngSeed = [3; 32]; /// Sets up ClientActor and ViewClientActor viewing the same store/runtime. pub fn setup( @@ -285,8 +290,8 @@ pub fn setup_mock_with_validity_period_and_no_epoch_sync( 5, account_id, skip_sync_wait, - 100, - 200, + MIN_BLOCK_PROD_TIME.as_millis() as u64, + MAX_BLOCK_PROD_TIME.as_millis() as u64, enable_doomslug, false, false, @@ -1669,6 +1674,7 @@ pub fn create_chunk( &*client.validator_signer.as_ref().unwrap().clone(), *last_block.header().next_bp_hash(), block_merkle_tree.root(), + None, ); (chunk, merkle_paths, receipts, block) } diff --git a/chain/client/src/tests/query_client.rs b/chain/client/src/tests/query_client.rs index 3e650ad49f0..1078308849f 100644 --- a/chain/client/src/tests/query_client.rs +++ b/chain/client/src/tests/query_client.rs @@ -89,6 +89,7 @@ fn query_status_not_crash() { &signer, block.header.next_bp_hash, block_merkle_tree.root(), + None, ); next_block.mut_header().get_mut().inner_lite.timestamp = to_timestamp(next_block.header().timestamp() + chrono::Duration::seconds(60)); diff --git a/chain/jsonrpc/src/lib.rs b/chain/jsonrpc/src/lib.rs index 9c00323a584..a917e76e758 100644 --- a/chain/jsonrpc/src/lib.rs +++ b/chain/jsonrpc/src/lib.rs @@ -1180,6 +1180,8 @@ impl JsonRpcHandler { near_jsonrpc_primitives::types::sandbox::RpcSandboxFastForwardResponse, near_jsonrpc_primitives::types::sandbox::RpcSandboxFastForwardError, > { + use near_network_primitives::types::SandboxResponse; + self.client_addr .send(NetworkClientMessages::Sandbox( near_network_primitives::types::NetworkSandboxMessage::SandboxFastForward( @@ -1187,6 +1189,36 @@ impl JsonRpcHandler { ), )) .await?; + + // Hard limit the request to timeout at an hour, since fast forwarding can take a while, + // where we can leave it to the rpc clients to set their own timeouts if necessary. + timeout(Duration::from_secs(60 * 60), async { + loop { + let fast_forward_finished = self + .client_addr + .send(NetworkClientMessages::Sandbox( + near_network_primitives::types::NetworkSandboxMessage::SandboxFastForwardStatus {}, + )) + .await; + + match fast_forward_finished { + Ok(NetworkClientResponses::SandboxResult(SandboxResponse::SandboxFastForwardFinished(true))) => break, + Ok(NetworkClientResponses::SandboxResult(SandboxResponse::SandboxFastForwardFailed(err))) => return Err(err), + _ => (), + } + + let _ = sleep(self.polling_config.polling_interval).await; + } + Ok(()) + }) + .await + .map_err(|_| near_jsonrpc_primitives::types::sandbox::RpcSandboxFastForwardError::InternalError { + error_message: "sandbox failed to fast forward within reasonable time of an hour".to_string() + })? + .map_err(|err| near_jsonrpc_primitives::types::sandbox::RpcSandboxFastForwardError::InternalError { + error_message: format!("sandbox failed to fast forward due to: {:?}", err), + })?; + Ok(near_jsonrpc_primitives::types::sandbox::RpcSandboxFastForwardResponse {}) } } diff --git a/chain/network-primitives/src/types.rs b/chain/network-primitives/src/types.rs index 07b5933c32b..f31dbf496fb 100644 --- a/chain/network-primitives/src/types.rs +++ b/chain/network-primitives/src/types.rs @@ -278,12 +278,15 @@ pub enum NetworkSandboxMessage { SandboxPatchState(Vec), SandboxPatchStateStatus, SandboxFastForward(near_primitives::types::BlockHeightDelta), + SandboxFastForwardStatus, } #[cfg(feature = "sandbox")] #[derive(Eq, PartialEq, Debug)] pub enum SandboxResponse { SandboxPatchStateFinished(bool), + SandboxFastForwardFinished(bool), + SandboxFastForwardFailed(String), } #[derive(actix::Message, AsStaticStr)] diff --git a/core/primitives/benches/serialization.rs b/core/primitives/benches/serialization.rs index 303f1af3c16..fdf4be041b9 100644 --- a/core/primitives/benches/serialization.rs +++ b/core/primitives/benches/serialization.rs @@ -68,6 +68,7 @@ fn create_block() -> Block { &signer, CryptoHash::default(), CryptoHash::default(), + None, ) } diff --git a/core/primitives/src/block.rs b/core/primitives/src/block.rs index ba1bea718a9..ff7eecfe32d 100644 --- a/core/primitives/src/block.rs +++ b/core/primitives/src/block.rs @@ -214,6 +214,7 @@ impl Block { signer: &dyn ValidatorSigner, next_bp_hash: CryptoHash, block_merkle_root: CryptoHash, + timestamp_override: Option>, ) -> Self { // Collect aggregate of validators and gas usage/limits from chunks. let mut validator_proposals = vec![]; @@ -243,7 +244,7 @@ impl Block { ); let new_total_supply = prev.total_supply() + minted_amount.unwrap_or(0) - balance_burnt; - let now = to_timestamp(Clock::utc()); + let now = to_timestamp(timestamp_override.unwrap_or_else(Clock::utc)); let time = if now <= prev.raw_timestamp() { prev.raw_timestamp() + 1 } else { now }; let (vrf_value, vrf_proof) = signer.compute_vrf_with_proof(prev.random_value().as_ref()); diff --git a/core/primitives/src/test_utils.rs b/core/primitives/src/test_utils.rs index c68f9cce75c..334c96ca816 100644 --- a/core/primitives/src/test_utils.rs +++ b/core/primitives/src/test_utils.rs @@ -438,6 +438,7 @@ impl Block { signer, next_bp_hash, block_merkle_root, + None, ) } } diff --git a/integration-tests/src/tests/client/challenges.rs b/integration-tests/src/tests/client/challenges.rs index 36ea9bfe9f9..b0cba98d264 100644 --- a/integration-tests/src/tests/client/challenges.rs +++ b/integration-tests/src/tests/client/challenges.rs @@ -69,6 +69,7 @@ fn test_verify_block_double_sign_challenge() { &signer, *b1.header().next_bp_hash(), block_merkle_tree.root(), + None, ); let epoch_id = b1.header().epoch_id().clone(); let valid_challenge = Challenge::produce( @@ -369,6 +370,7 @@ fn test_verify_chunk_invalid_state_challenge() { &validator_signer, *last_block.header().next_bp_hash(), block_merkle_tree.root(), + None, ); let challenge_body = { diff --git a/integration-tests/src/tests/client/process_blocks.rs b/integration-tests/src/tests/client/process_blocks.rs index ba20ac4287b..97fe1d827c2 100644 --- a/integration-tests/src/tests/client/process_blocks.rs +++ b/integration-tests/src/tests/client/process_blocks.rs @@ -378,6 +378,7 @@ fn receive_network_block() { &signer, last_block.header.next_bp_hash, block_merkle_tree.root(), + None, ); client.do_send(NetworkClientMessages::Block(block, PeerInfo::random().id, false)); future::ready(()) @@ -454,6 +455,7 @@ fn produce_block_with_approvals() { &signer1, last_block.header.next_bp_hash, block_merkle_tree.root(), + None, ); client.do_send(NetworkClientMessages::Block( block.clone(), @@ -643,6 +645,7 @@ fn invalid_blocks_common(is_requested: bool) { &signer, last_block.header.next_bp_hash, block_merkle_tree.root(), + None, ); // Send block with invalid chunk mask let mut block = valid_block.clone(); diff --git a/integration-tests/src/tests/client/sandbox.rs b/integration-tests/src/tests/client/sandbox.rs index a13d6d52708..d9e8a0dbc59 100644 --- a/integration-tests/src/tests/client/sandbox.rs +++ b/integration-tests/src/tests/client/sandbox.rs @@ -1,19 +1,10 @@ use std::path::Path; -use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; -use actix::System; - -use near_actix_test_utils::run_actix; use near_chain::{ChainGenesis, Provenance, RuntimeAdapter}; use near_chain_configs::Genesis; -use near_client::test_utils::{setup_mock, TestEnv}; +use near_client::test_utils::TestEnv; use near_crypto::{InMemorySigner, KeyType}; -use near_logger_utils::init_test_logger; -use near_network::types::{ - NetworkClientMessages, NetworkRequests, NetworkResponses, PeerManagerMessageResponse, -}; -use near_network_primitives::types::NetworkSandboxMessage; use near_primitives::account::Account; use near_primitives::serialize::{from_base64, to_base64}; use near_primitives::state_record::StateRecord; @@ -116,39 +107,3 @@ fn test_patch_account() { let test1_after = env.query_account("test1".parse().unwrap()); assert_eq!(test1_after.amount, 10); } - -#[test] -fn test_fast_forward() { - init_test_logger(); - run_actix(async { - let count = Arc::new(AtomicUsize::new(0)); - // Produce 20 blocks - let (client, _view_client) = setup_mock( - vec!["test".parse().unwrap()], - "test".parse().unwrap(), - true, - false, - Box::new(move |msg, _ctx, _| { - if let NetworkRequests::Block { block } = msg.as_network_requests_ref() { - let height = block.header().height(); - count.fetch_add(1, Ordering::Relaxed); - if count.load(Ordering::Relaxed) >= 20 { - assert!( - height >= 10000, - "Was not able to fast forward. Current height: {}", - height - ); - System::current().stop(); - } - } - PeerManagerMessageResponse::NetworkResponses(NetworkResponses::NoResponse) - }), - ); - - // Fast forward by 10,000 blocks: - client.do_send(NetworkClientMessages::Sandbox(NetworkSandboxMessage::SandboxFastForward( - 10000, - ))); - near_network::test_utils::wait_or_panic(5000); - }); -} diff --git a/integration-tests/src/tests/nearcore/sync_nodes.rs b/integration-tests/src/tests/nearcore/sync_nodes.rs index b80383f1550..d7f016699cb 100644 --- a/integration-tests/src/tests/nearcore/sync_nodes.rs +++ b/integration-tests/src/tests/nearcore/sync_nodes.rs @@ -93,6 +93,7 @@ fn add_blocks( signer, next_bp_hash, block_merkle_tree.root(), + None, ); block_merkle_tree.insert(*block.hash()); let _ = client.do_send(NetworkClientMessages::Block( diff --git a/nightly/sandbox.txt b/nightly/sandbox.txt index d94b8c9ee86..677abae683d 100644 --- a/nightly/sandbox.txt +++ b/nightly/sandbox.txt @@ -1,3 +1,4 @@ # python sandbox node tests pytest sandbox/patch_state.py --features sandbox pytest sandbox/fast_forward.py --features sandbox +pytest sandbox/fast_forward_epoch_boundary.py --features sandbox diff --git a/pytest/tests/sandbox/fast_forward.py b/pytest/tests/sandbox/fast_forward.py index 570ac8075b8..a4e9cbd796f 100644 --- a/pytest/tests/sandbox/fast_forward.py +++ b/pytest/tests/sandbox/fast_forward.py @@ -1,27 +1,85 @@ #!/usr/bin/env python3 # test fast fowarding by a specific block height within a sandbox node. This will -# fail if the block height is not past the forwarded height. +# fail if the block height is not past the forwarded height. Also we will test +# for the timestamps and epoch height being adjusted correctly after the block +# height is changed. -import sys, time +import datetime +import sys import pathlib sys.path.append(str(pathlib.Path(__file__).resolve().parents[2] / 'lib')) +import utils from cluster import start_cluster -from utils import figure_out_sandbox_binary # startup a RPC node -BLOCKS_TO_FASTFORARD = 10000 -CONFIG = figure_out_sandbox_binary() -nodes = start_cluster(1, 0, 1, CONFIG, [["epoch_length", 10]], {}) +MIN_BLOCK_PROD_TIME = 1 # seconds +MAX_BLOCK_PROD_TIME = 2 # seconds +EPOCH_LENGTH = 100 +BLOCKS_TO_FASTFORWARD = 4 * EPOCH_LENGTH +CONFIG = utils.figure_out_sandbox_binary() +CONFIG.update({ + "consensus": { + "min_block_production_delay": { + "secs": MIN_BLOCK_PROD_TIME, + "nanos": 0, + }, + "max_block_production_delay": { + "secs": MAX_BLOCK_PROD_TIME, + "nanos": 0, + }, + } +}) + +nodes = start_cluster(1, 0, 1, CONFIG, [["epoch_length", EPOCH_LENGTH]], {}) +sync_info = nodes[0].get_status()['sync_info'] +pre_forward_block_hash = sync_info['latest_block_hash'] # request to fast forward -nodes[0].json_rpc('sandbox_fast_forward', { - "delta_height": BLOCKS_TO_FASTFORARD, -}) +nodes[0].json_rpc('sandbox_fast_forward', + {"delta_height": BLOCKS_TO_FASTFORWARD}, + timeout=60) # wait a little for it to fast forward -time.sleep(3) +# if this call times out, then the fast_forward failed somewhere +utils.wait_for_blocks(nodes[0], target=BLOCKS_TO_FASTFORWARD + 10, timeout=10) + +# Assert that we're within the bounds of fast forward timestamp between range of min and max: +sync_info = nodes[0].get_status()['sync_info'] +earliest = datetime.datetime.strptime(sync_info['earliest_block_time'][:-4], + '%Y-%m-%dT%H:%M:%S.%f') +latest = datetime.datetime.strptime(sync_info['latest_block_time'][:-4], + '%Y-%m-%dT%H:%M:%S.%f') + +min_forwarded_secs = datetime.timedelta( + 0, BLOCKS_TO_FASTFORWARD * MIN_BLOCK_PROD_TIME) +max_forwarded_secs = datetime.timedelta( + 0, BLOCKS_TO_FASTFORWARD * MAX_BLOCK_PROD_TIME) +min_forwarded_time = earliest + min_forwarded_secs +max_forwarded_time = earliest + max_forwarded_secs + +assert min_forwarded_time < latest < max_forwarded_time + +# Check to see that the epoch height has been updated correctly: +epoch_height = nodes[0].get_validators()['result']['epoch_height'] +assert epoch_height == BLOCKS_TO_FASTFORWARD // EPOCH_LENGTH + +# Check if queries aren't failing after fast forwarding: +resp = nodes[0].json_rpc("block", {"finality": "optimistic"}) +assert resp['result']['chunks'][0]['height_created'] > BLOCKS_TO_FASTFORWARD +resp = nodes[0].json_rpc("block", {"finality": "final"}) +assert resp['result']['chunks'][0]['height_created'] > BLOCKS_TO_FASTFORWARD + +# Not necessarily a requirement, but current implementation should be able to retrieve +# one of the blocks before fast-forwarding: +resp = nodes[0].json_rpc("block", {"block_id": pre_forward_block_hash}) +assert resp['result']['chunks'][0]['height_created'] < BLOCKS_TO_FASTFORWARD -# Assert at the end that the node is past the amounts of blocks we specified -assert nodes[0].get_latest_block().height > BLOCKS_TO_FASTFORARD +# do one more fast forward request just so we make sure consecutive requests +# don't crash anything on the node +nodes[0].json_rpc('sandbox_fast_forward', + {"delta_height": BLOCKS_TO_FASTFORWARD}, + timeout=60) +resp = nodes[0].json_rpc("block", {"finality": "optimistic"}) +assert resp['result']['chunks'][0]['height_created'] > 2 * BLOCKS_TO_FASTFORWARD diff --git a/pytest/tests/sandbox/fast_forward_epoch_boundary.py b/pytest/tests/sandbox/fast_forward_epoch_boundary.py new file mode 100644 index 00000000000..5d0e87c409a --- /dev/null +++ b/pytest/tests/sandbox/fast_forward_epoch_boundary.py @@ -0,0 +1,51 @@ +#!/usr/bin/env python3 +# test fast fowarding on epoch boundaries just so we can see that epoch heights +# are being updated accordingly once we get near the boundary. + +import datetime +import sys +import pathlib + +sys.path.append(str(pathlib.Path(__file__).resolve().parents[2] / 'lib')) + +import utils +from cluster import start_cluster + +# startup a RPC node +MIN_BLOCK_PROD_TIME = 1 # seconds +MAX_BLOCK_PROD_TIME = 2 # seconds +EPOCH_LENGTH = 100 +CONFIG = utils.figure_out_sandbox_binary() +CONFIG.update({ + "consensus": { + "min_block_production_delay": { + "secs": MIN_BLOCK_PROD_TIME, + "nanos": 0, + }, + "max_block_production_delay": { + "secs": MAX_BLOCK_PROD_TIME, + "nanos": 0, + }, + } +}) + +nodes = start_cluster(1, 0, 1, CONFIG, [["epoch_length", EPOCH_LENGTH]], {}) + +# start at block_height = 10 +utils.wait_for_blocks(nodes[0], target=10) +# fast forward to about block_height=190 and then test for boundaries +nodes[0].json_rpc('sandbox_fast_forward', {"delta_height": 180}, timeout=60) +for i in range(20): + utils.wait_for_blocks(nodes[0], target=190 + i) + block_height = nodes[0].get_latest_block().height + epoch_height = nodes[0].get_validators()['result']['epoch_height'] + assert epoch_height == 2 if block_height > 200 else 1 + +# check that we still have correct epoch heights after consecutive fast forwards: +utils.wait_for_blocks(nodes[0], target=220) +nodes[0].json_rpc('sandbox_fast_forward', {"delta_height": 70}, timeout=60) +for i in range(20): + utils.wait_for_blocks(nodes[0], target=290 + i) + block_height = nodes[0].get_latest_block().height + epoch_height = nodes[0].get_validators()['result']['epoch_height'] + assert epoch_height == 3 if block_height > 300 else 2