Skip to content

Commit

Permalink
add validator catchup to multi-node test
Browse files Browse the repository at this point in the history
  • Loading branch information
sakridge committed Jun 29, 2018
1 parent ec56abf commit 7597d80
Showing 1 changed file with 94 additions and 3 deletions.
97 changes: 94 additions & 3 deletions tests/multinode.rs
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ fn converge(
exit: Arc<AtomicBool>,
num_nodes: usize,
threads: &mut Vec<JoinHandle<()>>,
) -> Vec<ReplicatedData> {
) -> (Vec<ReplicatedData>, PublicKey) {
//lets spy on the network
let mut spy = TestNode::new();
let daddr = "0.0.0.0:0".parse().unwrap();
Expand Down Expand Up @@ -90,7 +90,97 @@ fn converge(
.filter(|x| x.id != me)
.map(|x| x.clone())
.collect();
v.clone()
(v.clone(), me)
}

#[test]
fn test_multi_node_validator_catchup_from_zero() {
logger::setup();
const N: usize = 5;
trace!("test_multi_accountant_stub");
let leader = TestNode::new();
let alice = Mint::new(10_000);
let bob_pubkey = KeyPair::new().pubkey();
let exit = Arc::new(AtomicBool::new(false));

let leader_bank = Bank::new(&alice);
let server = Server::new_leader(
leader_bank,
0,
None,
leader.data.clone(),
leader.sockets.requests,
leader.sockets.transaction,
leader.sockets.broadcast,
leader.sockets.respond,
leader.sockets.gossip,
exit.clone(),
sink(),
);

let mut threads = server.thread_hdls;
for _ in 0..N {
validator(&leader.data, exit.clone(), &alice, &mut threads);
}
let (servers, spy_id0) = converge(&leader.data, exit.clone(), N + 2, &mut threads);
//contains the leader addr as well
assert_eq!(servers.len(), N + 1);
//verify leader can do transfer
let leader_balance = tx_and_retry_get_balance(&leader.data, &alice, &bob_pubkey).unwrap();
assert_eq!(leader_balance, 500);
//verify validator has the same balance
let mut success = 0usize;
for server in servers.iter() {
info!("0server: {:?}", server.id[0]);
let mut client = mk_client(server);
if let Ok(bal) = client.poll_get_balance(&bob_pubkey) {
info!("validator balance {}", bal);
if bal == leader_balance {
success += 1;
}
}
}
assert_eq!(success, servers.len());

success = 0;
// start up another validator, converge and then check everyone's balances
validator(&leader.data, exit.clone(), &alice, &mut threads);
let (servers, _) = converge(&leader.data, exit.clone(), N + 4, &mut threads);

let mut leader_balance = tx_and_retry_get_balance(&leader.data, &alice, &bob_pubkey).unwrap();
info!("leader balance {}", leader_balance);
loop {
let mut client = mk_client(&leader.data);
leader_balance = client.poll_get_balance(&bob_pubkey).unwrap();
if leader_balance == 1000 {
break;
}
sleep(Duration::from_millis(300));
}
assert_eq!(leader_balance, 1000);

for server in servers.iter() {
if server.id != spy_id0 {
let mut client = mk_client(server);
info!("1server: {:?}", server.id[0]);
for _ in 0..10 {
if let Ok(bal) = client.poll_get_balance(&bob_pubkey) {
info!("validator balance {}", bal);
if bal == leader_balance {
success += 1;
break;
}
}
sleep(Duration::from_millis(500));
}
}
}
assert_eq!(success, (servers.len() - 1));

exit.store(true, Ordering::Relaxed);
for t in threads {
t.join().unwrap();
}
}

#[test]
Expand Down Expand Up @@ -122,7 +212,7 @@ fn test_multi_node() {
for _ in 0..N {
validator(&leader.data, exit.clone(), &alice, &mut threads);
}
let servers = converge(&leader.data, exit.clone(), N + 2, &mut threads);
let (servers, _) = converge(&leader.data, exit.clone(), N + 2, &mut threads);
//contains the leader addr as well
assert_eq!(servers.len(), N + 1);
//verify leader can do transfer
Expand All @@ -140,6 +230,7 @@ fn test_multi_node() {
}
}
assert_eq!(success, servers.len());

exit.store(true, Ordering::Relaxed);
for t in threads {
t.join().unwrap();
Expand Down

0 comments on commit 7597d80

Please sign in to comment.