-
Notifications
You must be signed in to change notification settings - Fork 52
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
feat: health check in worker #1006
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,8 +1,13 @@ | ||
use clap::Parser; | ||
use pingora::{apps::HttpServerOptions, lb::Backend, prelude::Opt, server::Server}; | ||
use pingora::{ | ||
apps::HttpServerOptions, | ||
lb::Backend, | ||
prelude::{background_service, Opt}, | ||
server::Server, | ||
}; | ||
use pingora_proxy::http_proxy_service; | ||
|
||
use crate::proxy::LoadBalancer; | ||
use crate::proxy::{LoadBalancer, LoadBalancerState}; | ||
|
||
/// Starts the proxy defined in the config file. | ||
#[derive(Debug, Parser)] | ||
|
@@ -13,8 +18,8 @@ impl StartProxy { | |
/// | ||
/// This method will first read the config file to get the list of workers to start. It will | ||
/// then start a proxy with each worker as a backend. | ||
pub fn execute(&self) -> Result<(), String> { | ||
let mut server = Server::new(Some(Opt::default())).expect("Failed to create server"); | ||
pub async fn execute(&self) -> Result<(), String> { | ||
let mut server = Server::new(Some(Opt::default())).map_err(|err| err.to_string())?; | ||
server.bootstrap(); | ||
|
||
let proxy_config = super::ProxyConfig::load_config_from_file()?; | ||
|
@@ -23,25 +28,33 @@ impl StartProxy { | |
.workers | ||
.iter() | ||
.map(|worker| format!("{}:{}", worker.host, worker.port)) | ||
.map(|worker| Backend::new(&worker).expect("Failed to create backend")) | ||
.collect::<Vec<Backend>>(); | ||
.map(|worker| Backend::new(&worker).map_err(|err| err.to_string())) | ||
.collect::<Result<Vec<Backend>, String>>()?; | ||
|
||
let worker_lb = LoadBalancer::new(workers, &proxy_config); | ||
let worker_lb = LoadBalancerState::new(workers, &proxy_config).await?; | ||
|
||
let health_check_service = background_service("health_check", worker_lb); | ||
let worker_lb = health_check_service.task(); | ||
|
||
// Set up the load balancer | ||
let mut lb = http_proxy_service(&server.configuration, worker_lb); | ||
let mut lb = http_proxy_service(&server.configuration, LoadBalancer(worker_lb)); | ||
|
||
let proxy_host = proxy_config.host; | ||
let proxy_port = proxy_config.port.to_string(); | ||
lb.add_tcp(format!("{}:{}", proxy_host, proxy_port).as_str()); | ||
let logic = lb.app_logic_mut().expect("No app logic found"); | ||
let logic = lb.app_logic_mut().ok_or("Failed to get app logic")?; | ||
let mut http_server_options = HttpServerOptions::default(); | ||
|
||
// Enable HTTP/2 for plaintext | ||
http_server_options.h2c = true; | ||
logic.server_options = Some(http_server_options); | ||
|
||
server.add_service(health_check_service); | ||
server.add_service(lb); | ||
server.run_forever(); | ||
tokio::task::spawn_blocking(|| server.run_forever()) | ||
.await | ||
.map_err(|err| err.to_string())?; | ||
Comment on lines
+54
to
+56
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why do we need to run There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We do so because Pingora create a new runtime in |
||
|
||
Ok(()) | ||
} | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Maybe this could be done as part of the incoming follow-up work, but we should probably check (unless it's being dome somewhere already) that there are no duplicate workers at any point (both in the worker list and persisted config file) to avoid problems if the user accidentally adds the same address/port twice.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
It is not being checked. Though the worker config is planned to be removed from the configuration file. I think we can dismiss this for now and use that issue to fix this.