Skip to content

Commit

Permalink
Remove witness retrying logic on missing block
Browse files Browse the repository at this point in the history
Sometimes when we would like to process a ChunkStateWitness the
block required to process it isn't available yet, we have to wait
for it. near#10535 implemented a
hacky way to do it by retrying the processing every 500ms until the
required block arrives. This PR will implement a proper solution, so
let's remove the hacky workaround.
  • Loading branch information
jancionear committed Feb 21, 2024
1 parent b49613d commit 05d44fa
Show file tree
Hide file tree
Showing 3 changed files with 8 additions and 38 deletions.
6 changes: 1 addition & 5 deletions chain/client/src/adapter.rs
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,6 @@ pub enum ProcessTxResponse {
pub struct ChunkStateWitnessMessage {
pub witness: ChunkStateWitness,
pub peer_id: PeerId,
pub attempts_remaining: usize,
}

#[derive(actix::Message, Debug)]
Expand Down Expand Up @@ -350,10 +349,7 @@ impl near_network::client::Client for Adapter {
async fn chunk_state_witness(&self, witness: ChunkStateWitness, peer_id: PeerId) {
match self
.client_addr
.send(
ChunkStateWitnessMessage { witness, peer_id, attempts_remaining: 5 }
.with_span_context(),
)
.send(ChunkStateWitnessMessage { witness, peer_id }.with_span_context())
.await
{
Ok(()) => {}
Expand Down
27 changes: 3 additions & 24 deletions chain/client/src/client_actor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2006,32 +2006,11 @@ impl Handler<WithSpanContext<ChunkStateWitnessMessage>> for ClientActor {
fn handle(
&mut self,
msg: WithSpanContext<ChunkStateWitnessMessage>,
ctx: &mut Context<Self>,
_ctx: &mut Context<Self>,
) -> Self::Result {
let (_span, msg) = handler_debug_span!(target: "client", msg);
let peer_id = msg.peer_id.clone();
let attempts_remaining = msg.attempts_remaining;
match self.client.process_chunk_state_witness(msg.witness, msg.peer_id, None) {
Err(err) => {
tracing::error!(target: "client", ?err, "Error processing chunk state witness");
}
Ok(Some(witness)) => {
if attempts_remaining > 0 {
ctx.run_later(Duration::from_millis(100), move |_, ctx| {
ctx.address().do_send(
ChunkStateWitnessMessage {
witness,
peer_id,
attempts_remaining: attempts_remaining - 1,
}
.with_span_context(),
);
});
} else {
tracing::error!(target: "client", "Failed to process chunk state witness even after 5 tries due to missing parent block");
}
}
Ok(None) => {}
if let Err(err) = self.client.process_chunk_state_witness(msg.witness, msg.peer_id, None) {
tracing::error!(target: "client", ?err, "Error processing chunk state witness");
}
}
}
Expand Down
13 changes: 4 additions & 9 deletions chain/client/src/stateless_validation/chunk_validator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -588,18 +588,13 @@ impl Client {
witness: ChunkStateWitness,
peer_id: PeerId,
processing_done_tracker: Option<ProcessingDoneTracker>,
) -> Result<Option<ChunkStateWitness>, Error> {
) -> Result<(), Error> {
// TODO(#10502): Handle production of state witness for first chunk after genesis.
// Properly handle case for chunk right after genesis.
// Context: We are currently unable to handle production of the state witness for the
// first chunk after genesis as it's not possible to run the genesis chunk in runtime.
let prev_block_hash = witness.inner.chunk_header.prev_block_hash();
let prev_block = match self.chain.get_block(prev_block_hash) {
Ok(block) => block,
Err(_) => {
return Ok(Some(witness));
}
};
let prev_block = self.chain.get_block(prev_block_hash)?;
let prev_chunk_header = Chain::get_prev_chunk_header(
self.epoch_manager.as_ref(),
&prev_block,
Expand All @@ -616,7 +611,7 @@ impl Client {
&self.chunk_validator.network_sender,
self.chunk_endorsement_tracker.as_ref(),
);
return Ok(None);
return Ok(());
}

// TODO(#10265): If the previous block does not exist, we should
Expand All @@ -635,6 +630,6 @@ impl Client {
},
));
}
result.map(|_| None)
result
}
}

0 comments on commit 05d44fa

Please sign in to comment.