From c453975bd8c4533db574adf3566b377af3b34784 Mon Sep 17 00:00:00 2001 From: manudhundi Date: Fri, 29 Sep 2023 15:36:27 -0700 Subject: [PATCH] [Sharded-Execution-GRPC] Address some review comments --- execution/executor-service/src/remote_state_view.rs | 6 ++++++ execution/executor-service/src/tests.rs | 1 - secure/net/src/grpc_network_service/mod.rs | 9 ++++++--- secure/net/src/network_controller/mod.rs | 3 +++ 4 files changed, 15 insertions(+), 4 deletions(-) diff --git a/execution/executor-service/src/remote_state_view.rs b/execution/executor-service/src/remote_state_view.rs index 62d256be3ae9d..0af5e85b2fccc 100644 --- a/execution/executor-service/src/remote_state_view.rs +++ b/execution/executor-service/src/remote_state_view.rs @@ -211,6 +211,12 @@ impl RemoteStateValueReceiver { .inner .into_iter() .for_each(|(state_key, state_value)| { + // Though we try to insert the state key during the 'pre_fetch_state_values' remote + // request, there is a possibility that the state key is not inserted before we + // receive and process the response here. To be on the safe side, we insert the + // state key here if it is not inserted already. This does not matter because we + // do not have versioning of the state view. + state_view_lock.insert_state_key(state_key.clone()); state_view_lock.set_state_value(&state_key, state_value); }); } diff --git a/execution/executor-service/src/tests.rs b/execution/executor-service/src/tests.rs index 3f25c47639222..e206fedbc7167 100644 --- a/execution/executor-service/src/tests.rs +++ b/execution/executor-service/src/tests.rs @@ -55,7 +55,6 @@ pub fn create_thread_remote_executor_shards( } #[test] -#[ignore] fn test_sharded_block_executor_no_conflict() { use std::thread; diff --git a/secure/net/src/grpc_network_service/mod.rs b/secure/net/src/grpc_network_service/mod.rs index 4244ffbb56b14..308913831aad2 100644 --- a/secure/net/src/grpc_network_service/mod.rs +++ b/secure/net/src/grpc_network_service/mod.rs @@ -57,9 +57,12 @@ impl GRPCNetworkMessageServiceServerWrapper { .unwrap(); info!("Starting Server async at {:?}", server_addr); - // NOTE: serve_with_shutdown() starts the server, if successful the task does not return - // till the server is shutdown. Hence this should be called as a separate non-blocking task. - // Signal handler 'server_shutdown_rx' is needed to shutdown the server + // NOTE: (1) serve_with_shutdown() starts the server, if successful the task does not return + // till the server is shutdown. Hence this should be called as a separate + // non-blocking task. Signal handler 'server_shutdown_rx' is needed to shutdown + // the server + // (2) There is no easy way to know if/when the server has started successfully. Hence + // we may need to implement a healthcheck service to check if the server is up Server::builder() .timeout(std::time::Duration::from_millis(rpc_timeout_ms)) .add_service(NetworkMessageServiceServer::new(self)) diff --git a/secure/net/src/network_controller/mod.rs b/secure/net/src/network_controller/mod.rs index 4ffb2f51b8d8b..daf79d21bab40 100644 --- a/secure/net/src/network_controller/mod.rs +++ b/secure/net/src/network_controller/mod.rs @@ -146,6 +146,9 @@ impl NetworkController { self.outbound_task_shutdown_tx = self.outbound_handler.start(&self.outbound_rpc_runtime); } + // TODO: This is still not a very clean shutdown. We don't wait for the full shutdown after + // sending the signal. May not matter much for now because we shutdown before exiting the + // process. Ideally, we want to fix this. pub fn shutdown(&mut self) { info!("Shutting down network controller at {}", self.listen_addr); if let Some(shutdown_signal) = self.inbound_server_shutdown_tx.take() {