Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Broken, #[ignore]d tests #2162

Open
4 tasks
hrxi opened this issue Jan 29, 2024 · 0 comments
Open
4 tasks

Broken, #[ignore]d tests #2162

hrxi opened this issue Jan 29, 2024 · 0 comments

Comments

@hrxi
Copy link
Contributor

hrxi commented Jan 29, 2024

  • blockchain: can_rebranch_and_revert_chunks (ignored in 27cfb4e).
  • consensus: test_request_component (ignored since its introduction in 502c473).
  • validator: four_validators_can_create_an_epoch (ignored since cbb6fba).
  • validator: validator_can_catch_up (ignored since 79d1377).

#[test]
#[ignore]
fn can_rebranch_and_revert_chunks() {
let temp_producer1 = TemporaryBlockProducer::new();
let temp_producer2 = TemporaryBlockProducer::new_incomplete();
// Block 1, 1 chunk
let block1 = temp_producer1.next_block(vec![], false);
let chunk1 = temp_producer1.get_chunk(KeyNibbles::ROOT, 1);
let chunk2_start = chunk1.chunk.end_key.clone().unwrap();
assert_eq!(
temp_producer2.push_with_chunks(block1, TrieDiff::default(), vec![chunk1]),
Ok((PushResult::Extended, Ok(ChunksPushResult::Chunks(1, 0))))
);
// Block 2b, 1 chunk (to be rebranched)
let block2b = temp_producer1.next_block_no_push(vec![], true);
// Block 2a, 1 chunk (to be reverted)
let block2a = temp_producer1.next_block(vec![], false);
let chunk2a = temp_producer1.get_chunk(chunk2_start.clone(), 2);
assert_eq!(
temp_producer1.push(block2b.clone()),
Ok(PushResult::Rebranched)
);
let chunk2b = temp_producer1.get_chunk(chunk2_start, 3);
assert_eq!(
temp_producer2.push_with_chunks(block2a, TrieDiff::default(), vec![chunk2a]),
Ok((PushResult::Extended, Ok(ChunksPushResult::Chunks(1, 0))))
);
assert_eq!(
temp_producer2.push_with_chunks(block2b, TrieDiff::default(), vec![chunk2b]),
Ok((PushResult::Rebranched, Ok(ChunksPushResult::Chunks(1, 0))))
);
// Done
assert_eq!(
temp_producer2
.blockchain
.read()
.get_missing_accounts_range(None),
None
);
assert_eq!(
temp_producer2
.blockchain
.read()
.state
.accounts
.get_root_hash_assert(None),
temp_producer1
.blockchain
.read()
.state
.accounts
.get_root_hash_assert(None)
);
}

#[ignore]
#[test(tokio::test(flavor = "multi_thread", worker_threads = 4))]
async fn test_request_component() {
let mut hub = Some(MockHub::default());
let env = VolatileDatabase::new(20).expect("Could not open a volatile database");
// Generate genesis block.
let key = KeyPair::generate(&mut seeded_rng(0));
let sgn_key = KeyPair::generate(&mut seeded_rng(0));
let vtn_key = BLSKeyPair::generate(&mut seeded_rng(0));
let genesis = GenesisBuilder::default()
.with_genesis_validator(
Address::from(&key),
sgn_key.public,
vtn_key.public_key,
Address::default(),
None,
None,
false,
)
.generate(env)
.unwrap();
let mut node1 =
Node::<MockNetwork>::history_with_genesis_info(1, genesis.clone(), &mut hub, false).await;
let mut node2 =
Node::<MockNetwork>::history_with_genesis_info(2, genesis.clone(), &mut hub, false).await;
let producer1 = BlockProducer::new(signing_key(), voting_key());
node1.consume();
node2.consume();
// let node1 produce blocks again
{
let prod_blockchain = Arc::clone(&node1.blockchain);
tokio::spawn(async move {
loop {
produce_macro_blocks(&producer1, &prod_blockchain, 1);
tokio::time::sleep(Duration::from_secs(5)).await;
}
});
}
let mut connected = false;
let mut interval = tokio::time::interval(Duration::from_secs(1));
loop {
if node1.blockchain.read().block_number() > 200 + Policy::genesis_block_number()
&& !connected
{
log::info!("Connecting node2 to node 1");
node2.network.dial_mock(&node1.network);
connected = true;
}
log::info!(
"Node1: at #{} - {}",
node1.blockchain.read().block_number(),
node1.blockchain.read().head_hash()
);
log::info!(
"Node2: at #{} - {}",
node2.blockchain.read().block_number(),
node2.blockchain.read().head_hash()
);
interval.tick().await;
}
}

#[test(tokio::test(flavor = "multi_thread"))]
#[ignore]
async fn four_validators_can_create_an_epoch() {
let env = VolatileDatabase::new(20).expect("Could not open a volatile database");
let validators =
build_validators::<Network>(env, &(1u64..=4u64).collect::<Vec<_>>(), &mut None, false)
.await;
let blockchain = Arc::clone(&validators.first().unwrap().blockchain);
tokio::spawn(future::join_all(validators));
let events = blockchain.read().notifier_as_stream();
events.take(130).for_each(|_| future::ready(())).await;
assert!(blockchain.read().block_number() >= 130 + Policy::genesis_block_number());
}

#[ignore]
#[test(tokio::test)]
async fn validator_can_catch_up() {
// remove first block producer in order to trigger a skip block. Never connect him again
// remove the second block producer to trigger another skip block after the first one (which we want someone to catch up to). Never connect him again
// third block producer needs to be disconnected as well and then reconnected to catch up to the second's skip blocks while not having seen the first one,
// resulting in him producing the first block.
let hub = MockHub::default();
let env = VolatileDatabase::new(20).expect("Could not open a volatile database");
// In total 8 validator are registered. after 3 validators are taken offline the remaining 5 should not be able to progress on their own
let mut validators = build_validators::<Network>(
env,
&(9u64..=16u64).collect::<Vec<_>>(),
&mut Some(hub),
false,
)
.await;
// Maintain a collection of the corresponding networks.
let networks: Vec<Arc<Network>> = validators
.iter()
.map(|v| v.consensus.network.clone())
.collect();
// Disconnect the block producers for the next 3 skip blocks. remember the one which is supposed to actually create the block (3rd skip block)
let (validator, _) = {
let validator = validator_for_slot(&mut validators, 1, 1);
validator
.consensus
.network
.disconnect(CloseReason::GoingOffline)
.await;
let id1 = validator.validator_slot_band();
let validator = validator_for_slot(&mut validators, 2, 2);
validator
.consensus
.network
.disconnect(CloseReason::GoingOffline)
.await;
let id2 = validator.validator_slot_band();
assert_ne!(id2, id1);
// ideally we would remove the validators from the vec for them to not even execute.
// However the implementation does still progress their chains and since they have registered listeners, they would panic.
// that is confusing, thus they are allowed to execute (with no validator network connection)
// validators.retain(|v| {
// v.validator_address() != id1 && v.validator_address() != id2
// });
let validator = validator_for_slot(&validators, 3, 3);
validator
.consensus
.network
.disconnect(CloseReason::GoingOffline)
.await;
assert_ne!(id1, validator.validator_slot_band());
assert_ne!(id2, validator.validator_slot_band());
(validator, validator.consensus.network.clone())
};
// assert_eq!(validators.len(), 7);
let blockchain = validator.blockchain.clone();
// Listen for blockchain events from the block producer (after two skip blocks).
let mut events = blockchain.read().notifier_as_stream();
let slots: Vec<_> = blockchain.read().current_validators().unwrap().validators
[validator.validator_slot_band() as usize]
.slots
.clone()
.collect();
let skip_block_info = SkipBlockInfo {
block_number: 1,
vrf_entropy: blockchain.read().head().seed().entropy(),
};
// Manually construct a skip block for the validator
let vc = create_skip_block_update(
skip_block_info,
validator.voting_key(),
validator.validator_slot_band(),
&slots,
);
// let the validators run.
tokio::spawn(future::join_all(validators));
// while waiting for them to run into the block producer timeout (10s)
time::sleep(Duration::from_secs(11)).await;
// At which point the prepared skip block message is broadcast
// (only a subset of the validators will accept it as it send as level 1 message)
for network in &networks {
for peer_id in network.get_peers() {
network
.message::<SkipBlockMessage>(SkipBlockMessage(vc.clone()), peer_id)
.await
.unwrap();
}
}
// wait enough time to complete the skip block aggregation (it really does not matter how long, as long as the vc completes)
time::sleep(Duration::from_secs(8)).await;
// reconnect a validator (who has not seen the proof for the skip block)
log::warn!("connecting networks");
Network::connect_networks(&networks, 9u64).await;
// Wait for the new block producer to create a blockchainEvent (which is always an extended event for block 1) and keep the hash
if let Some(BlockchainEvent::Extended(hash)) = events.next().await {
// retrieve the block for height 1
if let Ok(block) = blockchain.read().get_block_at(1, false, None) {
// the hash needs to be the one the extended event returned.
// (the chain itself i.e blockchain.header_hash() might have already progressed further)
assert_eq!(block.header().hash(), hash);
// now in that case the validator producing this block has progressed the 2nd skip block without having seen the first skip block.
return;
}
}
assert!(false);
}

What should we do with these tests? Remove them? Try to fix them? I stumbled upon them while trying to run long-running tests.

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

1 participant