From d5528d2da1d042f06f97d07999135d1b8d5af469 Mon Sep 17 00:00:00 2001 From: larry-aptos <112209412+larry-aptos@users.noreply.github.com> Date: Tue, 3 Oct 2023 16:11:27 -0700 Subject: [PATCH] [indexer grpc] update the metrics for usage analysis. (#10383) --- .../indexer-grpc-data-service/src/metrics.rs | 17 +++---- .../indexer-grpc-data-service/src/service.rs | 48 ++++++++++++++----- 2 files changed, 44 insertions(+), 21 deletions(-) diff --git a/ecosystem/indexer-grpc/indexer-grpc-data-service/src/metrics.rs b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/metrics.rs index e5f1655179877..389d9e76e9a9b 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-data-service/src/metrics.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/metrics.rs @@ -12,7 +12,7 @@ pub static LATEST_PROCESSED_VERSION: Lazy = Lazy::new(|| { register_int_gauge_vec!( "indexer_grpc_data_service_with_user_latest_processed_version", "Latest processed transaction version", - &["request_token", "email"], + &["request_token", "email", "processor"], ) .unwrap() }); @@ -22,7 +22,7 @@ pub static PROCESSED_VERSIONS_COUNT: Lazy = Lazy::new(|| { register_int_counter_vec!( "indexer_grpc_data_service_with_user_processed_versions", "Number of transactions that have been processed by data service", - &["request_token", "email"], + &["request_token", "email", "processor"], ) .unwrap() }); @@ -42,7 +42,7 @@ pub static PROCESSED_LATENCY_IN_SECS: Lazy = Lazy::new(|| { register_gauge_vec!( "indexer_grpc_data_service_with_user_latest_data_latency_in_secs", "Latency of data service based on latest processed transaction", - &["request_token", "email"], + &["request_token", "email", "processor"], ) .unwrap() }); @@ -62,7 +62,7 @@ pub static PROCESSED_BATCH_SIZE: Lazy = Lazy::new(|| { register_int_gauge_vec!( "indexer_grpc_data_service_with_user_processed_batch_size", "Size of latest processed batch by data service", - &["request_token", "email"], + &["request_token", "email", "processor"], ) .unwrap() }); @@ -77,10 +77,11 @@ pub static CONNECTION_COUNT: Lazy = Lazy::new(|| { }); /// Count of the short connections; i.e., < 10 seconds. -pub static SHORT_CONNECTION_COUNT: Lazy = Lazy::new(|| { - register_int_counter!( - "indexer_grpc_data_service_short_connection_count", +pub static SHORT_CONNECTION_COUNT: Lazy = Lazy::new(|| { + register_int_counter_vec!( + "indexer_grpc_data_service_short_connection_by_user_count", "Count of the short connections; i.e., < 10 seconds", + &["request_token", "email"], ) .unwrap() }); @@ -91,7 +92,7 @@ pub static BYTES_READY_TO_TRANSFER_FROM_SERVER: Lazy = Lazy::new( register_int_counter_vec!( "indexer_grpc_data_service_bytes_ready_to_transfer_from_server", "Count of bytes ready to transfer to the client", - &["request_token", "email"], + &["request_token", "email", "processor"], ) .unwrap() }); diff --git a/ecosystem/indexer-grpc/indexer-grpc-data-service/src/service.rs b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/service.rs index 31dfbe6bb89f8..2cebdc2393b21 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-data-service/src/service.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/service.rs @@ -38,11 +38,13 @@ type ResponseStream = Pin(); BYTES_READY_TO_TRANSFER_FROM_SERVER .with_label_values(&[ - request_metadata.request_token.as_str(), + request_metadata.request_api_key_name.as_str(), request_metadata.request_email.as_str(), + request_metadata.processor_name.as_str(), ]) .inc_by(bytes_ready_to_transfer as u64); // 2. Push the data to the response channel, i.e. stream the data to the client. @@ -293,20 +306,23 @@ impl RawData for RawDataServerWrapper { Ok(_) => { PROCESSED_BATCH_SIZE .with_label_values(&[ - request_metadata.request_token.as_str(), + request_metadata.request_api_key_name.as_str(), request_metadata.request_email.as_str(), + request_metadata.processor_name.as_str(), ]) .set(current_batch_size as i64); LATEST_PROCESSED_VERSION .with_label_values(&[ - request_metadata.request_token.as_str(), + request_metadata.request_api_key_name.as_str(), request_metadata.request_email.as_str(), + request_metadata.processor_name.as_str(), ]) .set(end_of_batch_version as i64); PROCESSED_VERSIONS_COUNT .with_label_values(&[ - request_metadata.request_token.as_str(), + request_metadata.request_api_key_name.as_str(), request_metadata.request_email.as_str(), + request_metadata.processor_name.as_str(), ]) .inc_by(current_batch_size as u64); if let Some(data_latency_in_secs) = data_latency_in_secs { @@ -315,8 +331,9 @@ impl RawData for RawDataServerWrapper { if current_batch_size % BLOB_STORAGE_SIZE != 0 { PROCESSED_LATENCY_IN_SECS .with_label_values(&[ - request_metadata.request_token.as_str(), + request_metadata.request_api_key_name.as_str(), request_metadata.request_email.as_str(), + request_metadata.processor_name.as_str(), ]) .set(data_latency_in_secs); PROCESSED_LATENCY_IN_SECS_ALL @@ -353,7 +370,12 @@ impl RawData for RawDataServerWrapper { info!("[Indexer Data] Client disconnected."); if let Some(start_time) = connection_start_time { if start_time.elapsed().as_secs() < SHORT_CONNECTION_DURATION_IN_SECS { - SHORT_CONNECTION_COUNT.inc(); + SHORT_CONNECTION_COUNT + .with_label_values(&[ + request_metadata.request_api_key_name.as_str(), + request_metadata.request_email.as_str(), + ]) + .inc(); } } } @@ -471,7 +493,7 @@ fn get_request_metadata(req: &Request) -> tonic::Result< REQUEST_HEADER_APTOS_USER_CLASSIFICATION_HEADER, ), ("request_token", GRPC_AUTH_TOKEN_HEADER), - ("request_name", GRPC_REQUEST_NAME_HEADER), + ("processor_name", GRPC_REQUEST_NAME_HEADER), ]; let request_metadata_map: HashMap = request_metadata_pairs .into_iter()