diff --git a/.vscode/settings.json b/.vscode/settings.json index f9d7bc1bf..f689b66dc 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -11,6 +11,8 @@ "protobuf", "Substate", "swdd", + "templating", + "unrendered", "utest", "VecDeque" ], diff --git a/Cargo.lock b/Cargo.lock index d6ba9d590..6b90b1a65 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -108,6 +108,7 @@ dependencies = [ "common", "env_logger", "grpc", + "handlebars", "log", "mockall", "mockall_double", @@ -821,6 +822,20 @@ dependencies = [ "tracing", ] +[[package]] +name = "handlebars" +version = "6.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce25b617d1375ef96eeb920ae717e3da34a02fc979fe632c75128350f9e1f74a" +dependencies = [ + "log", + "pest", + "pest_derive", + "serde", + "serde_json", + "thiserror", +] + [[package]] name = "hashbrown" version = "0.12.3" @@ -1335,6 +1350,51 @@ version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" +[[package]] +name = "pest" +version = "2.7.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdbef9d1d47087a895abd220ed25eb4ad973a5e26f6a4367b038c25e28dfc2d9" +dependencies = [ + "memchr", + "thiserror", + "ucd-trie", +] + +[[package]] +name = "pest_derive" +version = "2.7.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d3a6e3394ec80feb3b6393c725571754c6188490265c61aaf260810d6b95aa0" +dependencies = [ + "pest", + "pest_generator", +] + +[[package]] +name = "pest_generator" +version = "2.7.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94429506bde1ca69d1b5601962c73f4172ab4726571a59ea95931218cb0e930e" +dependencies = [ + "pest", + "pest_meta", + "proc-macro2", + "quote", + "syn 2.0.75", +] + +[[package]] +name = "pest_meta" +version = "2.7.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac8a071862e93690b6e34e9a5fb8e33ff3734473ac0245b27232222c4906a33f" +dependencies = [ + "once_cell", + "pest", + "sha2", +] + [[package]] name = "petgraph" version = "0.6.5" @@ -2213,6 +2273,12 @@ version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" +[[package]] +name = "ucd-trie" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971" + [[package]] name = "umask" version = "2.1.0" diff --git a/agent/src/cli.rs b/agent/src/cli.rs index 31556e6e6..69d7156ad 100644 --- a/agent/src/cli.rs +++ b/agent/src/cli.rs @@ -19,7 +19,7 @@ use std::path::Path; use crate::control_interface::Directory; use crate::control_interface::FileSystemError; use clap::Parser; -use common::objects::state::STR_RE_AGENT; +use common::objects::STR_RE_AGENT; use common::DEFAULT_SERVER_ADDRESS; const DEFAULT_RUN_FOLDER: &str = "/tmp/ankaios/"; diff --git a/agent/src/runtime_manager.rs b/agent/src/runtime_manager.rs index 6eb8c656e..3a290c288 100644 --- a/agent/src/runtime_manager.rs +++ b/agent/src/runtime_manager.rs @@ -2054,11 +2054,11 @@ mod tests { dependencies: Some(ank_base::Dependencies { dependencies: HashMap::from([ ( - "workload A".to_string(), + "workload_A".to_string(), AddCondition::AddCondRunning as i32, ), ( - "workload C".to_string(), + "workload_C".to_string(), AddCondition::AddCondSucceeded as i32, ), ]), diff --git a/ank/src/cli.rs b/ank/src/cli.rs index 39bc50b41..ac539101b 100644 --- a/ank/src/cli.rs +++ b/ank/src/cli.rs @@ -63,6 +63,7 @@ fn workload_completer(current: &OsStr) -> Vec { fn completions_object_field_mask(state: Vec, current: &OsStr) -> Vec { const DESIRED_STATE: &str = "desiredState"; const WORKLOADS: &str = "workloads"; + const CONFIGS: &str = "configs"; const WORKLOAD_STATES: &str = "workloadStates"; let mut result = Vec::new(); @@ -79,6 +80,12 @@ fn completions_object_field_mask(state: Vec, current: &OsStr) -> Vec Result<(Object, Vecswdd~cli-apply-ankaios-manifest-agent-name-overwrite~1] pub fn handle_agent_overwrite( filter_masks: &Vec, - desired_agent: &Option, + cli_specified_agent_name: &Option, mut state_obj: Object, ) -> Result { - // No agent name specified through cli! - if desired_agent.is_none() { - // [impl->swdd~cli-apply-ankaios-manifest-error-on-agent-name-absence~1] - for field in filter_masks { - let path = &format!("{}.agent", String::from(field)); - if state_obj.get(&path.into()).is_none() { + for mask_path in filter_masks { + if mask_path.parts().starts_with(&["workloads".into()]) { + let workload_agent_mask: Path = format!("{}.agent", String::from(mask_path)).into(); + if let Some(agent_name) = cli_specified_agent_name { + // An agent name specified through cli -> do an agent name overwrite! + state_obj + .set( + &workload_agent_mask, + serde_yaml::Value::String(agent_name.to_owned()), + ) + .map_err(|_| "Could not find workload to update.".to_owned())?; + } else if state_obj.get(&workload_agent_mask).is_none() { + // No agent name specified through cli and inside workload configuration! + // [impl->swdd~cli-apply-ankaios-manifest-error-on-agent-name-absence~1] return Err( "No agent name specified -> use '--agent' option to specify!".to_owned(), ); } } } - // An agent name specified through cli -> do an agent name overwrite! - else { - let desired_agent_name = desired_agent.as_ref().unwrap().to_string(); - for field in filter_masks { - let path = &format!("{}.agent", String::from(field)); - if state_obj - .set( - &path.into(), - serde_yaml::Value::String(desired_agent_name.to_owned()), - ) - .is_err() - { - return Err("Could not find workload to update.".to_owned()); - } - } - } - state_obj .try_into() .map_err(|err| format!("Invalid manifest data provided: {}", err)) @@ -133,7 +125,7 @@ pub fn create_filter_masks_from_paths( pub fn generate_state_obj_and_filter_masks_from_manifests( manifests: &mut [InputSourcePair], apply_args: &ApplyArgs, -) -> Result<(CompleteState, Vec), String> { +) -> Result)>, String> { let mut req_obj: Object = State::default().try_into().unwrap(); let mut req_paths: Vec = Vec::new(); for manifest in manifests.iter_mut() { @@ -145,9 +137,10 @@ pub fn generate_state_obj_and_filter_masks_from_manifests( } if req_paths.is_empty() { - return Err("No workload provided in manifests!".to_owned()); + return Ok(None); } + output_debug!("req_paths:\n{:?}\n", req_paths); let filter_masks = create_filter_masks_from_paths(&req_paths, "desiredState"); output_debug!("\nfilter_masks:\n{:?}\n", filter_masks); @@ -165,7 +158,7 @@ pub fn generate_state_obj_and_filter_masks_from_manifests( }; output_debug!("\nstate_obj:\n{:?}\n", complete_state_req_obj); - Ok((complete_state_req_obj, filter_masks)) + Ok(Some((complete_state_req_obj, filter_masks))) } impl CliCommands { @@ -173,13 +166,17 @@ impl CliCommands { pub async fn apply_manifests(&mut self, apply_args: ApplyArgs) -> Result<(), CliError> { match get_input_sources(&apply_args.manifest_files) { Ok(mut manifests) => { - let (complete_state_req_obj, filter_masks) = + if let Some((complete_state_req_obj, filter_masks)) = generate_state_obj_and_filter_masks_from_manifests(&mut manifests, &apply_args) - .map_err(CliError::ExecutionError)?; - - // [impl->swdd~cli-apply-send-update-state~1] - self.update_state_and_wait_for_complete(complete_state_req_obj, filter_masks) - .await + .map_err(CliError::ExecutionError)? + { + // [impl->swdd~cli-apply-send-update-state~1] + self.update_state_and_wait_for_complete(complete_state_req_obj, filter_masks) + .await + } else { + output!("Nothing to update."); + Ok(()) + } } Err(err) => Err(CliError::ExecutionError(err.to_string())), } @@ -490,6 +487,38 @@ mod tests { ); } + // [utest->swdd~cli-apply-ankaios-manifest-agent-name-overwrite~1] + #[test] + fn utest_handle_agent_overwrite_considers_only_workloads() { + let state = test_utils::generate_test_state_from_workloads(vec![ + generate_test_workload_spec_with_param( + "agent_A".to_string(), + "wl1".to_string(), + "runtime_X".to_string(), + ), + ]); + + let expected_state = test_utils::generate_test_state_from_workloads(vec![ + generate_test_workload_spec_with_param( + "agent_A".to_string(), + "wl1".to_string(), + "runtime_X".to_string(), + ), + ]); + + let cli_specified_agent_name = None; + + assert_eq!( + handle_agent_overwrite( + &vec!["workloads.wl1".into(), "configs.config_key".into()], + &cli_specified_agent_name, + state.try_into().unwrap(), + ) + .unwrap(), + expected_state + ); + } + // [utest->swdd~cli-apply-generates-state-object-from-ankaios-manifests~1] // [utest->swdd~cli-apply-generates-filter-masks-from-ankaios-manifests~1] #[test] @@ -526,7 +555,7 @@ mod tests { vec![(manifest_file_name.to_string(), Box::new(manifest_content))]; assert_eq!( - Ok((expected_complete_state_obj, expected_filter_masks)), + Ok(Some((expected_complete_state_obj, expected_filter_masks))), generate_state_obj_and_filter_masks_from_manifests( &mut manifests[..], &ApplyArgs { @@ -563,27 +592,7 @@ mod tests { vec![(manifest_file_name.to_string(), Box::new(manifest_content))]; assert_eq!( - Ok((expected_complete_state_obj, expected_filter_masks)), - generate_state_obj_and_filter_masks_from_manifests( - &mut manifests[..], - &ApplyArgs { - agent_name: None, - manifest_files: vec![manifest_file_name.to_string()], - delete_mode: true, - }, - ) - ); - } - - #[test] - fn utest_generate_state_obj_and_filter_masks_from_manifests_no_workload_provided() { - let manifest_file_name = "manifest.yaml"; - let manifest_content = io::Cursor::new(b"apiVersion: \"v0.1\""); - let mut manifests: Vec = - vec![(manifest_file_name.to_string(), Box::new(manifest_content))]; - - assert_eq!( - Err("No workload provided in manifests!".to_string()), + Ok(Some((expected_complete_state_obj, expected_filter_masks))), generate_state_obj_and_filter_masks_from_manifests( &mut manifests[..], &ApplyArgs { @@ -685,7 +694,7 @@ mod tests { //[utest->swdd~cli-apply-send-update-state~1] // [utest->swdd~cli-watches-workloads~1] #[tokio::test] - async fn utest_apply_manifests_ok() { + async fn utest_apply_manifests_workloads_updated_ok() { let _guard = crate::test_helper::MOCKALL_CONTEXT_SYNC .get_lock_async() .await; @@ -794,6 +803,119 @@ mod tests { assert!(apply_result.is_ok()); } + // [utest->swdd~cli-apply-generates-state-object-from-ankaios-manifests~1] + // [utest->swdd~cli-apply-generates-filter-masks-from-ankaios-manifests~1] + // [utest->swdd~cli-apply-send-update-state~1] + #[tokio::test] + async fn utest_apply_manifests_only_configs_to_update_ok() { + let _guard = crate::test_helper::MOCKALL_CONTEXT_SYNC + .get_lock_async() + .await; + + let manifest_content = io::Cursor::new( + b"apiVersion: \"v0.1\"\nworkloads: {}\nconfigs:\n config_1: config_value_1", + ); + + let mut manifest_data = String::new(); + let _ = manifest_content.clone().read_to_string(&mut manifest_data); + + let updated_state = CompleteState { + desired_state: serde_yaml::from_str(&manifest_data).unwrap(), + ..Default::default() + }; + + let mut mock_server_connection = MockServerConnection::default(); + mock_server_connection + .expect_update_state() + .with( + eq(updated_state.clone()), + eq(vec!["desiredState.configs.config_1".to_string()]), + ) + .return_once(|_, _| Ok(UpdateStateSuccess::default())); + + mock_server_connection + .expect_get_complete_state() + .return_once(|_| Ok(FilteredCompleteState::default())); + + mock_server_connection + .expect_take_missed_from_server_messages() + .never(); + + mock_server_connection + .expect_read_next_update_workload_state() + .never(); + + let mut cmd = CliCommands { + _response_timeout_ms: RESPONSE_TIMEOUT_MS, + no_wait: true, + server_connection: mock_server_connection, + }; + + FAKE_GET_INPUT_SOURCE_MOCK_RESULT_LIST + .lock() + .unwrap() + .push_back(Ok(vec![( + "manifest.yml".to_string(), + Box::new(manifest_content), + )])); + + let apply_result = cmd + .apply_manifests(ApplyArgs { + agent_name: None, + delete_mode: false, + manifest_files: vec!["manifest_yaml".to_string()], + }) + .await; + assert!(apply_result.is_ok()); + } + + // [utest->swdd~cli-apply-generates-state-object-from-ankaios-manifests~1] + // [utest->swdd~cli-apply-generates-filter-masks-from-ankaios-manifests~1] + #[tokio::test] + async fn utest_apply_manifests_nothing_to_update_ok() { + let _guard = crate::test_helper::MOCKALL_CONTEXT_SYNC + .get_lock_async() + .await; + + let manifest_content = io::Cursor::new(b"apiVersion: \"v0.1\""); + + let mut mock_server_connection = MockServerConnection::default(); + mock_server_connection.expect_update_state().never(); + + mock_server_connection.expect_get_complete_state().never(); + + mock_server_connection + .expect_take_missed_from_server_messages() + .never(); + + mock_server_connection + .expect_read_next_update_workload_state() + .never(); + + let mut cmd = CliCommands { + _response_timeout_ms: RESPONSE_TIMEOUT_MS, + no_wait: false, + server_connection: mock_server_connection, + }; + + FAKE_GET_INPUT_SOURCE_MOCK_RESULT_LIST + .lock() + .unwrap() + .push_back(Ok(vec![( + "manifest.yml".to_string(), + Box::new(manifest_content), + )])); + + let apply_result = cmd + .apply_manifests(ApplyArgs { + agent_name: None, + delete_mode: false, + manifest_files: vec!["manifest_yaml".to_string()], + }) + .await; + assert!(apply_result.is_ok()); + } + #[tokio::test] async fn utest_apply_manifest_invalid_names() { let _guard = crate::test_helper::MOCKALL_CONTEXT_SYNC diff --git a/common/doc/swdesign/README.md b/common/doc/swdesign/README.md index 60f1a4ab0..9a86f6d87 100644 --- a/common/doc/swdesign/README.md +++ b/common/doc/swdesign/README.md @@ -347,7 +347,7 @@ The Common library shall provide functionality for enforcing a workload name to: * have a maximal length of 63 characters Rationale: -A consistent naming manner assures stability in usage, compatibility with Ankaios internal structure by ensuring proper function of the filtering. +A consistent naming manner assures stability in usage and compatibility with Ankaios internal structure by ensuring proper function of the filtering. Tags: - Objects @@ -365,7 +365,43 @@ Status: approved The Common library shall provide functionality for enforcing an agent name to contain only regular upper and lowercase characters (a-z and A-Z), numbers and the symbols "-" and "_". Rationale: -A consistent naming manner assures stability in usage, compatibility with Ankaios internal structure by ensuring proper function of the filtering. +A consistent naming manner assures stability in usage and compatibility with Ankaios internal structure by ensuring proper function of the filtering. + +Tags: +- Objects + +Needs: +- impl +- utest +- stest + +#### Config item key naming convention +`swdd~common-config-item-key-naming-convention~1` + +Status: approved + +The Common library shall provide functionality for enforcing a config item key to contain only regular upper and lowercase characters (a-z and A-Z), numbers and the symbols "-" and "_". + +Rationale: +A consistent naming manner assures stability in usage and compatibility with Ankaios internal structure by ensuring proper function of the filtering. + +Tags: +- Objects + +Needs: +- impl +- utest +- stest + +#### Config aliases and referenced config keys naming convention +`swdd~common-config-aliases-and-config-reference-keys-naming-convention~1` + +Status: approved + +The Common library shall provide functionality for enforcing a workload's config reference key value pairs to contain only regular upper and lowercase characters (a-z and A-Z), numbers and the symbols "-" and "_". + +Rationale: +A consistent naming manner assures stability in usage and compatibility with Ankaios internal structure by ensuring proper function of the filtering. Tags: - Objects diff --git a/common/src/objects/config.rs b/common/src/objects/config.rs index 7283e7d38..0c36e15c2 100644 --- a/common/src/objects/config.rs +++ b/common/src/objects/config.rs @@ -83,6 +83,41 @@ impl TryFrom for ConfigItem { // ## ####### ######### ## // ////////////////////////////////////////////////////////////////////////////// +#[cfg(any(feature = "test_utils", test))] +pub fn generate_test_configs() -> HashMap { + HashMap::from([ + ( + "config_1".to_string(), + ConfigItem::ConfigObject(HashMap::from([ + ( + "values".to_string(), + ConfigItem::ConfigObject(HashMap::from([ + ( + "value_1".to_string(), + ConfigItem::String("value123".to_string()), + ), + ( + "value_2".to_string(), + ConfigItem::ConfigArray(vec![ + ConfigItem::String("list_value_1".to_string()), + ConfigItem::String("list_value_2".to_string()), + ]), + ), + ])), + ), + ( + "agent_name".to_string(), + ConfigItem::String("agent_A".to_owned()), + ), + ])), + ), + ( + "config_2".to_string(), + ConfigItem::String("value_3".to_string()), + ), + ]) +} + #[cfg(test)] mod tests { use api::ank_base; diff --git a/common/src/objects/mod.rs b/common/src/objects/mod.rs index 9fe12e9c7..8120a1e84 100644 --- a/common/src/objects/mod.rs +++ b/common/src/objects/mod.rs @@ -18,7 +18,6 @@ pub mod state; pub use state::State; -pub use state::{STR_RE_AGENT, STR_RE_WORKLOAD}; mod complete_state; pub use complete_state::CompleteState; @@ -41,7 +40,7 @@ pub use stored_workload_spec::{ generate_test_stored_workload_spec, generate_test_stored_workload_spec_with_config, }; -pub use stored_workload_spec::StoredWorkloadSpec; +pub use stored_workload_spec::{StoredWorkloadSpec, STR_RE_CONFIG_REFERENCES}; mod workload_state; #[cfg(any(feature = "test_utils", test))] @@ -62,6 +61,7 @@ pub use workload_spec::{ generate_test_workload_spec_with_dependencies, generate_test_workload_spec_with_param, generate_test_workload_spec_with_runtime_config, }; +pub use workload_spec::{STR_RE_AGENT, STR_RE_WORKLOAD}; pub use workload_spec::{ get_workloads_per_agent, AddCondition, DeleteCondition, DeletedWorkload, @@ -87,4 +87,6 @@ pub use control_interface_access::{ }; mod config; +#[cfg(any(feature = "test_utils", test))] +pub use config::generate_test_configs; pub use config::ConfigItem; diff --git a/common/src/objects/state.rs b/common/src/objects/state.rs index 7cbdcd450..81cbf50a8 100644 --- a/common/src/objects/state.rs +++ b/common/src/objects/state.rs @@ -11,22 +11,18 @@ // under the License. // // SPDX-License-Identifier: Apache-2.0 - -use regex::Regex; use serde::{Deserialize, Serialize}; +use regex::Regex; use std::collections::HashMap; use crate::helpers::serialize_to_ordered_map; use crate::objects::ConfigItem; -use crate::objects::StoredWorkloadSpec; +use crate::objects::{StoredWorkloadSpec, STR_RE_CONFIG_REFERENCES}; use api::ank_base; const CURRENT_API_VERSION: &str = "v0.1"; -const MAX_CHARACTERS_WORKLOAD_NAME: usize = 63; -pub const STR_RE_WORKLOAD: &str = r"^[a-zA-Z0-9_-]+*$"; -pub const STR_RE_AGENT: &str = r"^[a-zA-Z0-9_-]*$"; // [impl->swdd~common-object-representation~1] // [impl->swdd~common-object-serialization~1] @@ -97,42 +93,34 @@ impl TryFrom for State { } impl State { - // [impl->swdd~common-workload-naming-convention~1] - // [impl->swdd~common-agent-naming-convention~1] - pub fn verify_format(provided_state: &State) -> Result<(), String> { + pub fn verify_api_version(provided_state: &State) -> Result<(), String> { if provided_state.api_version != CURRENT_API_VERSION { - return Err(format!( + Err(format!( "Unsupported API version. Received '{}', expected '{}'", provided_state.api_version, State::default().api_version - )); + )) + } else { + Ok(()) } + } - let re_workloads = Regex::new(STR_RE_WORKLOAD).unwrap(); - let re_agent = Regex::new(STR_RE_AGENT).unwrap(); - - for (workload_name, workload_spec) in &provided_state.workloads { - if !re_workloads.is_match(workload_name.as_str()) { - return Err(format!( - "Unsupported workload name. Received '{}', expected to have characters in {}", - workload_name, STR_RE_WORKLOAD - )); - } - if workload_name.len() > MAX_CHARACTERS_WORKLOAD_NAME { + // [impl->swdd~common-config-item-key-naming-convention~1] + pub fn verify_configs_format(provided_state: &State) -> Result<(), String> { + let re_config_items = Regex::new(STR_RE_CONFIG_REFERENCES).unwrap(); + for config_key in provided_state.configs.keys() { + if !re_config_items.is_match(config_key.as_str()) { return Err(format!( - "Workload name length {} exceeds the maximum limit of {} characters", - workload_name.len(), - MAX_CHARACTERS_WORKLOAD_NAME - )); - } - if !re_agent.is_match(workload_spec.agent.as_str()) { - return Err(format!( - "Unsupported agent name. Received '{}', expected to have characters in {}", - workload_spec.agent, STR_RE_AGENT + "Unsupported config item key. Received '{}', expected to have characters in {}", + config_key, STR_RE_CONFIG_REFERENCES )); } } + for workload in provided_state.workloads.values() { + // [impl->swdd~common-config-aliases-and-config-reference-keys-naming-convention~1] + StoredWorkloadSpec::verify_config_reference_format(&workload.configs)?; + } Ok(()) } } @@ -150,16 +138,19 @@ impl State { // [utest->swdd~common-object-serialization~1] #[cfg(test)] mod tests { - - use super::{CURRENT_API_VERSION, MAX_CHARACTERS_WORKLOAD_NAME, STR_RE_AGENT, STR_RE_WORKLOAD}; use api::ank_base; use std::collections::HashMap; use crate::{ - objects::{State, StoredWorkloadSpec}, + objects::{generate_test_configs, generate_test_stored_workload_spec, ConfigItem, State}, test_utils::{generate_test_proto_state, generate_test_state}, }; + const WORKLOAD_NAME_1: &str = "workload_1"; + const AGENT_A: &str = "agent_A"; + const RUNTIME: &str = "runtime"; + const INVALID_CONFIG_KEY: &str = "invalid%key"; + #[test] fn utest_converts_to_proto_state() { let ankaios_state = generate_test_state(); @@ -194,10 +185,8 @@ mod tests { #[test] fn utest_state_accepts_compatible_state() { - let state_compatible_version = State { - ..Default::default() - }; - assert_eq!(State::verify_format(&state_compatible_version), Ok(())); + let state_compatible_version = State::default(); + assert_eq!(State::verify_api_version(&state_compatible_version), Ok(())); } #[test] @@ -208,94 +197,93 @@ mod tests { ..Default::default() }; assert_eq!( - State::verify_format(&state_incompatible_version), + State::verify_api_version(&state_incompatible_version), Err(format!( "Unsupported API version. Received '{}', expected '{}'", - api_version, CURRENT_API_VERSION + api_version, + super::CURRENT_API_VERSION )) ); } - // [utest->swdd~common-workload-naming-convention~1] #[test] - fn utest_state_rejects_incompatible_state_on_workload_name() { - let workload_name = "nginx.test".to_string(); - let state_incompatible_version = State { - api_version: "v0.1".to_string(), - workloads: HashMap::from([(workload_name.clone(), StoredWorkloadSpec::default())]), - ..Default::default() + fn utest_state_rejects_state_without_api_version() { + let state_proto_no_version = ank_base::State { + api_version: "".into(), + workloads: Some(ank_base::WorkloadMap { + workloads: HashMap::new(), + }), + configs: Some(ank_base::ConfigMap { + configs: HashMap::new(), + }), }; - assert_eq!( - State::verify_format(&state_incompatible_version), - Err(format!( - "Unsupported workload name. Received '{}', expected to have characters in {}", - workload_name, STR_RE_WORKLOAD - )) - ); + let state_ankaios_no_version = State::try_from(state_proto_no_version).unwrap(); + + assert_eq!(state_ankaios_no_version.api_version, "".to_string()); + + let file_without_api_version = ""; + let deserialization_result = serde_yaml::from_str::(file_without_api_version) + .unwrap_err() + .to_string(); + assert_eq!(deserialization_result, "missing field `apiVersion`"); } - // [utest->swdd~common-workload-naming-convention~1] + // [utest->swdd~common-config-item-key-naming-convention~1] #[test] - fn utest_state_rejects_incompatible_state_on_inordinately_long_workload_name() { - let workload_name = "workload_name_is_too_long_for_ankaios_to_accept_it_and_I_don_t_know_what_else_to_write".to_string(); - let state_incompatible_version = State { - api_version: "v0.1".to_string(), - workloads: HashMap::from([(workload_name.clone(), StoredWorkloadSpec::default())]), - ..Default::default() + fn utest_verify_configs_format_compatible_config_item_keys_and_config_references() { + let workload = generate_test_stored_workload_spec(AGENT_A, RUNTIME); + let state = State { + api_version: super::CURRENT_API_VERSION.into(), + workloads: HashMap::from([(WORKLOAD_NAME_1.to_string(), workload)]), + configs: generate_test_configs(), }; - assert_eq!( - State::verify_format(&state_incompatible_version), - Err(format!( - "Workload name length {} exceeds the maximum limit of {} characters", - workload_name.len(), - MAX_CHARACTERS_WORKLOAD_NAME - )) - ); + + assert_eq!(State::verify_configs_format(&state), Ok(())); } - // [utest->swdd~common-agent-naming-convention~1] + // [utest->swdd~common-config-item-key-naming-convention~1] #[test] - fn utest_state_rejects_incompatible_state_on_agent_name() { - let agent_name = "agent_A.test".to_string(); - let state_incompatible_version = State { - api_version: "v0.1".to_string(), - workloads: HashMap::from([( - "sample".to_string(), - StoredWorkloadSpec { - agent: agent_name.clone(), - ..Default::default() - }, + fn utest_verify_configs_format_incompatible_config_item_key() { + let state = State { + api_version: super::CURRENT_API_VERSION.into(), + configs: HashMap::from([( + INVALID_CONFIG_KEY.to_owned(), + ConfigItem::String("value".to_string()), )]), ..Default::default() }; + assert_eq!( - State::verify_format(&state_incompatible_version), + State::verify_configs_format(&state), Err(format!( - "Unsupported agent name. Received '{}', expected to have characters in {}", - agent_name, STR_RE_AGENT + "Unsupported config item key. Received '{}', expected to have characters in {}", + INVALID_CONFIG_KEY, + super::STR_RE_CONFIG_REFERENCES )) ); } + // [utest->swdd~common-config-aliases-and-config-reference-keys-naming-convention~1] #[test] - fn utest_state_rejects_state_without_api_version() { - let state_proto_no_version = ank_base::State { - api_version: "".into(), - workloads: Some(ank_base::WorkloadMap { - workloads: HashMap::new(), - }), - configs: Some(ank_base::ConfigMap { - configs: HashMap::new(), - }), + fn utest_verify_configs_format_incompatible_workload_config_alias() { + let mut workload = generate_test_stored_workload_spec(AGENT_A, RUNTIME); + workload + .configs + .insert(INVALID_CONFIG_KEY.to_owned(), "config_1".to_string()); + + let state = State { + api_version: super::CURRENT_API_VERSION.into(), + workloads: HashMap::from([(WORKLOAD_NAME_1.to_string(), workload)]), + ..Default::default() }; - let state_ankaios_no_version = State::try_from(state_proto_no_version).unwrap(); - - assert_eq!(state_ankaios_no_version.api_version, "".to_string()); - let file_without_api_version = ""; - let deserialization_result = serde_yaml::from_str::(file_without_api_version) - .unwrap_err() - .to_string(); - assert_eq!(deserialization_result, "missing field `apiVersion`"); + assert_eq!( + State::verify_configs_format(&state), + Err(format!( + "Unsupported config alias. Received '{}', expected to have characters in {}", + INVALID_CONFIG_KEY, + super::STR_RE_CONFIG_REFERENCES + )) + ); } } diff --git a/common/src/objects/stored_workload_spec.rs b/common/src/objects/stored_workload_spec.rs index db41d44b5..c48cb8803 100644 --- a/common/src/objects/stored_workload_spec.rs +++ b/common/src/objects/stored_workload_spec.rs @@ -12,6 +12,7 @@ // // SPDX-License-Identifier: Apache-2.0 +use regex::Regex; use std::collections::HashMap; use api::ank_base; @@ -24,6 +25,8 @@ use super::{ WorkloadInstanceName, WorkloadSpec, }; +pub const STR_RE_CONFIG_REFERENCES: &str = r"^[a-zA-Z0-9_-]*$"; + #[derive(Debug, Serialize, Default, Deserialize, Clone, PartialEq, Eq)] #[serde(rename_all = "camelCase")] pub struct StoredWorkloadSpec { @@ -42,6 +45,31 @@ pub struct StoredWorkloadSpec { pub configs: HashMap, } +impl StoredWorkloadSpec { + // [impl->swdd~common-config-aliases-and-config-reference-keys-naming-convention~1] + pub fn verify_config_reference_format( + config_references: &HashMap, + ) -> Result<(), String> { + let re_config_references = Regex::new(STR_RE_CONFIG_REFERENCES).unwrap(); + for (config_alias, referenced_config) in config_references { + if !re_config_references.is_match(config_alias) { + return Err(format!( + "Unsupported config alias. Received '{}', expected to have characters in {}", + config_alias, STR_RE_CONFIG_REFERENCES + )); + } + + if !re_config_references.is_match(referenced_config) { + return Err(format!( + "Unsupported config reference key. Received '{}', expected to have characters in {}", + referenced_config, STR_RE_CONFIG_REFERENCES + )); + } + } + Ok(()) + } +} + impl TryFrom for StoredWorkloadSpec { type Error = String; @@ -149,8 +177,8 @@ pub fn generate_test_stored_workload_spec_with_config( StoredWorkloadSpec { agent: agent.into(), dependencies: HashMap::from([ - (String::from("workload A"), AddCondition::AddCondRunning), - (String::from("workload C"), AddCondition::AddCondSucceeded), + (String::from("workload_A"), AddCondition::AddCondRunning), + (String::from("workload_C"), AddCondition::AddCondSucceeded), ]), restart_policy: RestartPolicy::Always, runtime: runtime_name.into(), @@ -183,4 +211,27 @@ pub fn generate_test_stored_workload_spec( // [utest->swdd~common-object-serialization~1] #[cfg(test)] -mod tests {} +mod tests { + use super::StoredWorkloadSpec; + use std::collections::HashMap; + + // one test for a failing case, other cases are tested on the caller side to not repeat test code + // [utest->swdd~common-config-aliases-and-config-reference-keys-naming-convention~1] + #[test] + fn utest_verify_config_reference_format_invalid_config_reference_key() { + let invalid_config_reference_key = "invalid%key"; + let mut configs = HashMap::new(); + configs.insert( + "config_alias_1".to_owned(), + invalid_config_reference_key.to_owned(), + ); + assert_eq!( + StoredWorkloadSpec::verify_config_reference_format(&configs), + Err(format!( + "Unsupported config reference key. Received '{}', expected to have characters in {}", + invalid_config_reference_key, + super::STR_RE_CONFIG_REFERENCES + )) + ); + } +} diff --git a/common/src/objects/workload_spec.rs b/common/src/objects/workload_spec.rs index afa3652ab..b3e5ac775 100644 --- a/common/src/objects/workload_spec.rs +++ b/common/src/objects/workload_spec.rs @@ -12,6 +12,7 @@ // // SPDX-License-Identifier: Apache-2.0 +use regex::Regex; use serde::{Deserialize, Serialize}; use std::collections::HashMap; @@ -24,6 +25,11 @@ use super::WorkloadInstanceName; pub type WorkloadCollection = Vec; pub type DeletedWorkloadCollection = Vec; + +const MAX_CHARACTERS_WORKLOAD_NAME: usize = 63; +pub const STR_RE_WORKLOAD: &str = r"^[a-zA-Z0-9_-]+*$"; +pub const STR_RE_AGENT: &str = r"^[a-zA-Z0-9_-]*$"; + // [impl->swdd~common-object-serialization~1] #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Default)] pub struct DeletedWorkload { @@ -51,6 +57,48 @@ impl WorkloadSpec { pub fn needs_control_interface(&self) -> bool { !self.control_interface_access.allow_rules.is_empty() } + + // [impl->swdd~common-workload-naming-convention~1] + // [impl->swdd~common-agent-naming-convention~1] + pub fn verify_fields_format(workload_spec: &WorkloadSpec) -> Result<(), String> { + Self::verify_workload_name_format(workload_spec.instance_name.workload_name())?; + Self::verify_agent_name_format(workload_spec.instance_name.agent_name())?; + Ok(()) + } + + // [impl->swdd~common-workload-naming-convention~1] + fn verify_workload_name_format(workload_name: &str) -> Result<(), String> { + let re_workloads = Regex::new(STR_RE_WORKLOAD).unwrap(); + if !re_workloads.is_match(workload_name) { + return Err(format!( + "Unsupported workload name. Received '{}', expected to have characters in {}", + workload_name, STR_RE_WORKLOAD + )); + } + + if workload_name.len() > MAX_CHARACTERS_WORKLOAD_NAME { + Err(format!( + "Workload name length {} exceeds the maximum limit of {} characters", + workload_name.len(), + MAX_CHARACTERS_WORKLOAD_NAME + )) + } else { + Ok(()) + } + } + + // [impl->swdd~common-agent-naming-convention~1] + fn verify_agent_name_format(agent_name: &str) -> Result<(), String> { + let re_agent = Regex::new(STR_RE_AGENT).unwrap(); + if !re_agent.is_match(agent_name) { + Err(format!( + "Unsupported agent name. Received '{}', expected to have characters in {}", + agent_name, STR_RE_AGENT + )) + } else { + Ok(()) + } + } } pub type AgentWorkloadMap = HashMap; @@ -216,8 +264,8 @@ use crate::objects::generate_test_control_interface_access; #[cfg(any(feature = "test_utils", test))] fn generate_test_dependencies() -> HashMap { HashMap::from([ - (String::from("workload A"), AddCondition::AddCondRunning), - (String::from("workload C"), AddCondition::AddCondSucceeded), + (String::from("workload_A"), AddCondition::AddCondRunning), + (String::from("workload_C"), AddCondition::AddCondSucceeded), ]) } @@ -314,9 +362,9 @@ pub fn generate_test_workload_spec_with_dependencies( // [utest->swdd~common-object-serialization~1] #[cfg(test)] mod tests { - use crate::objects::*; use crate::test_utils::*; + const RUNTIME: &str = "runtime"; #[test] fn utest_get_workloads_per_agent_one_agent_one_workload() { @@ -434,14 +482,14 @@ mod tests { generate_test_deleted_workload("agent X".to_string(), "workload X".to_string()); deleted_workload.dependencies.insert( - "workload C".to_string(), + "workload_C".to_string(), DeleteCondition::DelCondNotPendingNorRunning, ); let serialized_deleted_workload = serde_yaml::to_string(&deleted_workload).unwrap(); let indices = [ - serialized_deleted_workload.find("workload A").unwrap(), - serialized_deleted_workload.find("workload C").unwrap(), + serialized_deleted_workload.find("workload_A").unwrap(), + serialized_deleted_workload.find("workload_C").unwrap(), ]; assert!( indices.windows(2).all(|window| window[0] < window[1]), @@ -509,4 +557,67 @@ mod tests { workload_spec.control_interface_access = generate_test_control_interface_access(); assert!(workload_spec.needs_control_interface()); } + + // [utest->swdd~common-workload-naming-convention~1] + // [utest->swdd~common-agent-naming-convention~1] + #[test] + fn utest_workload_verify_fields_format_success() { + let compatible_workload_spec = generate_test_workload_spec(); + assert_eq!( + WorkloadSpec::verify_fields_format(&compatible_workload_spec), + Ok(()) + ); + } + + // [utest->swdd~common-workload-naming-convention~1] + #[test] + fn utest_workload_verify_fields_incompatible_workload_name() { + let spec_with_wrong_workload_name = generate_test_workload_spec_with_param( + "agent_A".to_owned(), + "incompatible.workload_name".to_owned(), + RUNTIME.to_owned(), + ); + + assert_eq!( + WorkloadSpec::verify_fields_format(&spec_with_wrong_workload_name), + Err(format!( + "Unsupported workload name. Received '{}', expected to have characters in {}", + spec_with_wrong_workload_name.instance_name.workload_name(), + super::STR_RE_WORKLOAD + )) + ); + } + + // [utest->swdd~common-agent-naming-convention~1] + #[test] + fn utest_workload_verify_fields_incompatible_agent_name() { + let spec_with_wrong_agent_name = generate_test_workload_spec_with_param( + "incompatible.agent_name".to_owned(), + "workload_1".to_owned(), + RUNTIME.to_owned(), + ); + + assert_eq!( + WorkloadSpec::verify_fields_format(&spec_with_wrong_agent_name), + Err(format!( + "Unsupported agent name. Received '{}', expected to have characters in {}", + spec_with_wrong_agent_name.instance_name.agent_name(), + super::STR_RE_AGENT + )) + ); + } + + // [utest->swdd~common-workload-naming-convention~1] + #[test] + fn utest_verify_workload_name_format_inordinately_long_workload_name() { + let workload_name = "workload_name_is_too_long_for_ankaios_to_accept_it_and_I_don_t_know_what_else_to_write".to_string(); + assert_eq!( + WorkloadSpec::verify_workload_name_format(&workload_name), + Err(format!( + "Workload name length {} exceeds the maximum limit of {} characters", + workload_name.len(), + super::MAX_CHARACTERS_WORKLOAD_NAME, + )) + ); + } } diff --git a/common/src/state_manipulation/object.rs b/common/src/state_manipulation/object.rs index f4444024b..4137c849e 100644 --- a/common/src/state_manipulation/object.rs +++ b/common/src/state_manipulation/object.rs @@ -764,8 +764,8 @@ mod tests { .entry( "dependencies", Mapping::default() - .entry("workload A", "ADD_COND_RUNNING") - .entry("workload C", "ADD_COND_SUCCEEDED"), + .entry("workload_A", "ADD_COND_RUNNING") + .entry("workload_C", "ADD_COND_SUCCEEDED"), ) .entry("restartPolicy", "ALWAYS") .entry("runtime", "runtime") diff --git a/common/src/test_utils.rs b/common/src/test_utils.rs index 8875f98a2..a86fe4372 100644 --- a/common/src/test_utils.rs +++ b/common/src/test_utils.rs @@ -143,11 +143,11 @@ fn generate_test_proto_dependencies() -> Dependencies { Dependencies { dependencies: (HashMap::from([ ( - String::from("workload A"), + String::from("workload_A"), ank_base::AddCondition::AddCondRunning.into(), ), ( - String::from("workload C"), + String::from("workload_C"), ank_base::AddCondition::AddCondSucceeded.into(), ), ])), @@ -156,7 +156,7 @@ fn generate_test_proto_dependencies() -> Dependencies { fn generate_test_delete_dependencies() -> HashMap { HashMap::from([( - String::from("workload A"), + String::from("workload_A"), DeleteCondition::DelCondNotPendingNorRunning, )]) } diff --git a/doc/docs/reference/complete-state.md b/doc/docs/reference/complete-state.md index 4aea8c0e7..90a14a934 100644 --- a/doc/docs/reference/complete-state.md +++ b/doc/docs/reference/complete-state.md @@ -41,6 +41,7 @@ desiredState: command: - echo - "Hello from a container in a pod" + configs: {} hello1: agent: agent_B tags: @@ -52,6 +53,7 @@ desiredState: image: alpine:latest commandOptions: [ "--rm"] commandArgs: [ "echo", "Hello Ankaios"] + configs: {} hello2: agent: agent_B tags: @@ -64,6 +66,7 @@ desiredState: image: alpine:latest commandOptions: [ "--entrypoint", "/bin/sh" ] commandArgs: [ "-c", "echo 'Always restarted.'; sleep 2"] + configs: {} nginx: agent: agent_A tags: @@ -75,6 +78,8 @@ desiredState: runtimeConfig: | image: docker.io/nginx:latest commandOptions: ["-p", "8081:80"] + configs: {} + configs: {} workloadStates: [] agents: {} ``` @@ -90,10 +95,10 @@ desiredState: !!! Note - In case of workload names, the naming convention states that thier names shall: - * contain only regular upper and lowercase characters (a-z and A-Z), numbers and the symbols "-" and "_" - * have a minimal length of 1 character - * have a maximal length of 63 characters + In case of workload names, the naming convention states that their names shall:
+ - contain only regular upper and lowercase characters (a-z and A-Z), numbers and the symbols "-" and "_"
+ - have a minimal length of 1 character
+ - have a maximal length of 63 characters
Also, agent name shall contain only regular upper and lowercase characters (a-z and A-Z), numbers and the symbols "-" and "_". ## Object field mask @@ -107,7 +112,7 @@ The object field mask can be constructed using the field names of the [CompleteS 1. Example: `ank -k get state desiredState.workloads.nginx` returns only the information about nginx workload: - ```yaml + ```yaml desiredState: apiVersion: v0.1 workloads: @@ -122,25 +127,26 @@ The object field mask can be constructed using the field names of the [CompleteS runtimeConfig: | image: docker.io/nginx:latest commandOptions: ["-p", "8081:80"] - ``` + configs: {} + ``` 2. Example `ank -k get state desiredState.workloads.nginx.runtimeConfig` returns only the runtime configuration of nginx workload: - ```yaml - desiredState: - apiVersion: v0.1 - workloads: - nginx: - runtimeConfig: | - image: docker.io/nginx:latest - commandOptions: ["-p", "8081:80"] - ``` + ```yaml + desiredState: + apiVersion: v0.1 + workloads: + nginx: + runtimeConfig: | + image: docker.io/nginx:latest + commandOptions: ["-p", "8081:80"] + ``` 3. Example `ank -k set state desiredState.workloads.nginx.restartPolicy new-state.yaml` changes the restart behavior of nginx workload to `NEVER`: - ```yaml title="new-state.yaml" - desiredState: - workloads: - nginx: - restartPolicy: NEVER - ``` + ```yaml title="new-state.yaml" + desiredState: + workloads: + nginx: + restartPolicy: NEVER + ``` diff --git a/doc/docs/reference/startup-configuration.md b/doc/docs/reference/startup-configuration.md index 1f4415fe8..c31bc1ca5 100644 --- a/doc/docs/reference/startup-configuration.md +++ b/doc/docs/reference/startup-configuration.md @@ -13,10 +13,11 @@ A workload specification must contain the following information: * `workload name`_(via field key)_, specify the workload name to identify the workload in the Ankaios system. * `runtime`, specify the type of the runtime. Currently supported values are `podman` and `podman-kube`. -* `agent`, specify the name of the owning agent which is going to execute the workload. +* `agent`, specify the name of the owning agent which is going to execute the workload. Supports templated strings. * `restartPolicy`, specify how the workload should be restarted upon exiting (not implemented yet). * `tags`, specify a list of `key` `value` pairs. -* `runtimeConfig`, specify as a _string_ the configuration for the [runtime](./glossary.md#runtime) whose configuration structure is specific for each runtime, e.g., for `podman` runtime the [PodmanRuntimeConfig](#podmanruntimeconfig) is used. +* `runtimeConfig`, specify as a _string_ the configuration for the [runtime](./glossary.md#runtime) whose configuration structure is specific for each runtime, e.g., for `podman` runtime the [PodmanRuntimeConfig](#podmanruntimeconfig) is used. Supports templated strings. +* `configs`: assign configuration items defined in the state's `configs` field to the workload * `controlInterfaceAccess`, specify the access rights of the workload for the control interface. Example `startup-config.yaml` file: @@ -31,17 +32,32 @@ workloads: tags: - key: owner value: Ankaios team + configs: + port: web_server_port runtimeConfig: | image: docker.io/nginx:latest - commandOptions: ["-p", "8081:80"] + commandOptions: ["-p", "{{port.access_port}}:80"] controlInterfaceAccess: allowRules: - type: StateRule operation: Read filterMask: - "workloadStates" +configs: + web_server_port: + access_port: "8081" ``` +Ankaios supports templated strings and [essential control directives](https://github.com/sunng87/handlebars-rust/tree/v6.1.0?tab=readme-ov-file#limited-but-essential-control-structures-built-in) in the handlebars templating language for the following workload fields: + +* `agent` +* `runtimeConfig` + +Ankaios renders a templated state at startup or when the state is updated. The rendering replaces the templated strings with the configuration items associated with each workload. The configuration items themselves are defined in a `configs` field, which contains several key-value pairs. The key specifies the name of the configuration item and the value is a string, list or associative data structure. To see templated workload configurations in action, follow the [tutorial about sending and receiving vehicle data](../usage/tutorial-vehicle-signals.md#define-re-usable-configuration). + +!!! Note + The name of a configuration item can only contain regular characters, digits, the "-" and "_" symbols. The same applies to the keys and values of the workload's `configs` field when assigning configuration items to a workload. + ### PodmanRuntimeConfig The runtime configuration for the `podman` runtime is specified as follows: diff --git a/doc/docs/usage/quickstart.md b/doc/docs/usage/quickstart.md index 128a35100..d51f245d4 100644 --- a/doc/docs/usage/quickstart.md +++ b/doc/docs/usage/quickstart.md @@ -72,17 +72,19 @@ desiredState: runtimeConfig: | image: docker.io/nginx:latest commandOptions: ["-p", "8081:80"] + configs: {} + configs: {} workloadStates: -- instanceName: - agentName: agent_A - workloadName: nginx - id: 7d6ea2b79cea1e401beee1553a9d3d7b5bcbb37f1cfdb60db1fbbcaa140eb17d - executionState: - state: Running - subState: Ok - additionalInfo: '' + agent_A: + nginx: + cc74dd34189ef3181a2f15c6c5f5b0e76aaefbcd55397e15314e7a25bad0864b: + state: Running + subState: Ok + additionalInfo: '' agents: - agent_A: {} + agent_A: + cpuUsage: 2 + freeMemory: 7989682176 ``` or diff --git a/doc/docs/usage/tutorial-vehicle-signals.md b/doc/docs/usage/tutorial-vehicle-signals.md index bf6e20255..6989748a7 100644 --- a/doc/docs/usage/tutorial-vehicle-signals.md +++ b/doc/docs/usage/tutorial-vehicle-signals.md @@ -340,3 +340,127 @@ dependencies: ``` The next time the Ankaios server and the two agents will be started, this startup config will be applied. + +## Define re-usable configuration + +Let's improve the previous startup manifest by introducing a templated configuration for workloads to avoid configuration repetition and have a single point of change. The supported fields and syntax are described [here](../reference/startup-configuration.md). + +```yaml title="/etc/ankaios/state.yaml" hl_lines="5-8 12 15 18-21 25-28 34-36 40-54" +apiVersion: v0.1 +workloads: + databroker: + runtime: podman + agent: "{{agent.name}}" # (1)! + configs: + agent: agents # (2)! + network: network # (3)! + runtimeConfig: | # (4)! + image: ghcr.io/eclipse/kuksa.val/databroker:0.4.1 + commandArgs: ["--insecure"] + commandOptions: ["--net={{network}}"] + speed-provider: + runtime: podman + agent: "{{agent.name}}" + dependencies: + databroker: ADD_COND_RUNNING + configs: + agent: agents + net: network + env: env_provider # (5)! + runtimeConfig: | # (6)! + image: ghcr.io/eclipse-ankaios/speed-provider:0.1.1 + commandOptions: + - "--net={{net}}" + {{#each env}} + - "-e {{this.key}}={{this.value}}" + {{/each}} + speed-consumer: + runtime: podman + agent: infotainment + dependencies: + databroker: ADD_COND_RUNNING + configs: + network: network + env: env_consumer # (7)! + runtimeConfig: | # (8)! + image: ghcr.io/eclipse-ankaios/speed-consumer:0.1.2 + commandOptions: + - "--net={{network}}" + {{#each env}} + - "-e {{this.key}}={{this.value}}" + {{/each}} +configs: # (9)! + network: host + env_provider: + - key: SPEED_PROVIDER_MODE + value: auto + env_consumer: + - key: KUKSA_DATA_BROKER_ADDR + value: "127.0.0.1" + agents: + name: agent_A +``` + +1. The agent name is templated and rendered with the configuration value that the 'agent' alias refers to, which is 'agent_A'. +2. The configuration item 'agents' is assigned to the workload with alias 'agent'. +3. The configuration item 'network' is assigned to the workload with alias 'network'. +4. The runtimeConfig contains a template string accessing the assigned network configuration item. It is rendered with the configuration value that the 'network' alias refers to, which is 'host'. +5. The configuration item 'env_provider' is assigned to the workload with alias 'env'. +6. In addition to the templated string for the network, the runtimeConfig contains a templated loop to assign all environment variables that the 'env' alias refers to, which is 'SPEED_PROVIDER_MODE' with value 'auto'. +7. The configuration item 'env_consumer' is assigned to the workload with alias 'env'. +8. In addition to the templated string for the network, the runtimeConfig contains a templated loop to assign all environment variables that the 'env' alias refers to, which is 'KUKSA_DATA_BROKER_ADDR' with value '127.0.0.1'. +9. The configuration items are defined as key-value pairs. + +Start the Ankaios cluster again, by executing the following command: + +```shell +sudo systemctl start ank-server +sudo systemctl start ank-agent +``` + +Start the `infotainment` agent, remembering to change the server URL if the agent is not running on the same host: + +```shell +ank-agent -k --name infotainment --server-url http://127.0.0.1:25551 +``` + +Verify again that all workloads are up and running. + +### Update configuration items + +Let's update the content of a configuration item with the `ank apply` command. + +Using `ank apply`: + +```yaml title="new-manifest.yaml" +apiVersion: v0.1 +configs: + env_provider: + - key: SPEED_PROVIDER_MODE + value: webui +``` + +```shell +ank -k apply new-manifest.yaml +``` + +Ankaios will update workloads that reference an updated configuration item. +After running one of these commands, the `speed-provider` workload has been updated to run in the 'webui' mode. + +You can verify this by re-opening the web UI on . + +### Delete configuration items + +Let's try to delete a configuration item still referenced by workloads in its `configs` field by re-using the previous manifest content. + +```shell +ank -k apply -d new-manifest.yaml +``` + +The command returns an error that the rendering of the new state fails due to a missing configuration item. + +Ankaios will always reject a new state if it fails to render. The `speed-provider` still references the configuration item in its `configs` field which would no longer exist. + +Running the `ank -k get state` command afterwards will show that Ankaios still has the previous state in memory. + +To remove configuration items, remove the configuration references for the desired configuration items in the workload's `configs` field, and remove the desired configuration items from the state. This can be done in a single step. diff --git a/examples/rust_control_interface/src/main.rs b/examples/rust_control_interface/src/main.rs index 3c2f38077..f99e5561c 100644 --- a/examples/rust_control_interface/src/main.rs +++ b/examples/rust_control_interface/src/main.rs @@ -18,7 +18,7 @@ use api::ank_base::{ }; use api::control_api::{ - from_ankaios::FromAnkaiosEnum, to_ankaios::ToAnkaiosEnum, FromAnkaios, ToAnkaios, Hello + from_ankaios::FromAnkaiosEnum, to_ankaios::ToAnkaiosEnum, FromAnkaios, Hello, ToAnkaios, }; use prost::Message; @@ -50,7 +50,9 @@ mod logging { /// Create a Hello message to initialize the session fn create_hello_message() -> ToAnkaios { ToAnkaios { - to_ankaios_enum: Some(ToAnkaiosEnum::Hello(Hello{ protocol_version: env!("ANKAIOS_VERSION").to_string() })), + to_ankaios_enum: Some(ToAnkaiosEnum::Hello(Hello { + protocol_version: env!("ANKAIOS_VERSION").to_string(), + })), } } @@ -78,6 +80,7 @@ fn create_request_to_add_new_workload() -> ToAnkaios { dependencies: Some(Dependencies { dependencies: HashMap::new(), }), + configs: None, control_interface_access: None, }, )]), @@ -86,16 +89,19 @@ fn create_request_to_add_new_workload() -> ToAnkaios { ToAnkaios { to_ankaios_enum: Some(ToAnkaiosEnum::Request(Request { request_id: REQUEST_ID.to_string(), - request_content: Some(RequestContent::UpdateStateRequest(UpdateStateRequest { - new_state: Some(CompleteState { - desired_state: Some(State { - api_version: "v0.1".into(), - workloads: new_workloads, + request_content: Some(RequestContent::UpdateStateRequest(Box::new( + UpdateStateRequest { + new_state: Some(CompleteState { + desired_state: Some(State { + api_version: "v0.1".into(), + workloads: new_workloads, + ..Default::default() + }), + ..Default::default() }), - ..Default::default() - }), - update_mask: vec!["desiredState.workloads.dynamic_nginx".to_string()], - })), + update_mask: vec!["desiredState.workloads.dynamic_nginx".to_string()], + }, + ))), })), } } @@ -199,7 +205,13 @@ fn write_to_control_interface() { }); let protobuf_hello_message = create_hello_message(); - logging::log(format!("Sending initial Hello message:\n{:#?}", protobuf_hello_message).as_str()); + logging::log( + format!( + "Sending initial Hello message:\n{:#?}", + protobuf_hello_message + ) + .as_str(), + ); sc_req .write_all(&protobuf_hello_message.encode_length_delimited_to_vec()) .unwrap(); diff --git a/grpc/src/from_server_proxy.rs b/grpc/src/from_server_proxy.rs index 95bdf171b..634fcd39b 100644 --- a/grpc/src/from_server_proxy.rs +++ b/grpc/src/from_server_proxy.rs @@ -490,7 +490,7 @@ mod tests { *workload .dependencies - .get_mut(&String::from("workload A")) + .get_mut(&String::from("workload_A")) .unwrap() = -1; // simulate the reception of an update workload grpc from server message diff --git a/grpc/src/grpc_api.rs b/grpc/src/grpc_api.rs index 55475950a..230741403 100644 --- a/grpc/src/grpc_api.rs +++ b/grpc/src/grpc_api.rs @@ -247,7 +247,7 @@ impl TryFrom for to_server_interface::ToServer { #[cfg(test)] fn generate_test_proto_delete_dependencies() -> HashMap { HashMap::from([( - String::from("workload A"), + String::from("workload_A"), DeleteCondition::DelCondNotPendingNorRunning.into(), )]) } @@ -571,7 +571,7 @@ mod tests { #[test] fn utest_converts_to_ankaios_deleted_workload_fails() { let mut proto_workload = generate_test_proto_deleted_workload(); - proto_workload.dependencies.insert("workload B".into(), -1); + proto_workload.dependencies.insert("workload_B".into(), -1); assert!(ankaios::DeletedWorkload::try_from(proto_workload).is_err()); } @@ -588,11 +588,11 @@ mod tests { }), dependencies: HashMap::from([ ( - String::from("workload A"), + String::from("workload_A"), ank_base::AddCondition::AddCondRunning.into(), ), ( - String::from("workload C"), + String::from("workload_C"), ank_base::AddCondition::AddCondSucceeded.into(), ), ]), @@ -614,11 +614,11 @@ mod tests { let ank_workload = ankaios::WorkloadSpec { dependencies: HashMap::from([ ( - String::from("workload A"), + String::from("workload_A"), ankaios::AddCondition::AddCondRunning, ), ( - String::from("workload C"), + String::from("workload_C"), ankaios::AddCondition::AddCondSucceeded, ), ]), @@ -641,11 +641,11 @@ mod tests { }), dependencies: HashMap::from([ ( - String::from("workload A"), + String::from("workload_A"), ank_base::AddCondition::AddCondRunning.into(), ), ( - String::from("workload C"), + String::from("workload_C"), ank_base::AddCondition::AddCondSucceeded.into(), ), ]), @@ -671,12 +671,12 @@ mod tests { }), dependencies: HashMap::from([ ( - String::from("workload A"), + String::from("workload_A"), ank_base::AddCondition::AddCondRunning.into(), ), - (String::from("workload B"), -1), + (String::from("workload_B"), -1), ( - String::from("workload C"), + String::from("workload_C"), ank_base::AddCondition::AddCondSucceeded.into(), ), ]), diff --git a/grpc/src/to_server_proxy.rs b/grpc/src/to_server_proxy.rs index 9ac28e7ca..a8af63911 100644 --- a/grpc/src/to_server_proxy.rs +++ b/grpc/src/to_server_proxy.rs @@ -496,7 +496,7 @@ mod tests { .as_mut() .unwrap() .dependencies - .get_mut(&String::from("workload A")) + .get_mut(&String::from("workload_A")) .unwrap() = -1; let ankaios_update_mask = vec!["bla".into()]; diff --git a/server/Cargo.toml b/server/Cargo.toml index 5d113d7da..268a71365 100644 --- a/server/Cargo.toml +++ b/server/Cargo.toml @@ -29,6 +29,7 @@ async-stream = "0.3" serde = { version = "1.0", features = ["derive"] } serde_yaml = "0.9" clap = { version = "4.5", features = ["derive", "env"] } +handlebars = "6.1.0" [dev-dependencies] common = { path = "../common", features = ["test_utils"] } diff --git a/server/doc/swdesign/README.md b/server/doc/swdesign/README.md index 6bee37004..37a2c76fd 100644 --- a/server/doc/swdesign/README.md +++ b/server/doc/swdesign/README.md @@ -24,6 +24,31 @@ The following diagram shows a high level view of an Ankaios Server in its contex ### Design decisions +The following section holds the design decisions taken during the development of the Ankaios server. + +#### Delegate template rendering of workload configs to the handlebars external library +`swdd~server-delegate-template-render-to-external-library~1` + +Status: approved + +Ankaios uses the handlebars crate to render the configs provided for a workload. + +Rationale: + +The handlebars crate provides all the functionality needed to render the configs with templates, reducing implementation and integration effort. It is actively maintained and widely deployed. It is not overloaded with features, instead it supports the minimum amount of features needed to cover the use cases of workload configs. In addition, its rendering capabilities are extensible if a desired feature is missing in the future. + +Needs: +- impl + +Assumptions: + +No assumptions were taken. + +Considered alternatives: + +- Askama: does not support rendering templates at runtime, mainly used for generating code based on templates +- Tera: Jinja2 template engine contains too many features beyond the use case + ## Structural view The following diagram shows the structural view of the Ankaios Server: @@ -53,6 +78,10 @@ The Communication Middleware is responsible for: The ServerState is a data structure for maintaining the state of the Ankaios server. It prevents invariants when updating the state, by doing checks on the new state before applying it or when a view on the state is requested. +### ConfigRenderer + +The ConfigRenderer is responsible for rendering the templated configuration of workloads with their corresponding configuration items provided inside the CompleteState. + ## Behavioral view ### Startup sequence @@ -323,7 +352,10 @@ Needs: Status: approved -The Ankaios Server shall select the Agent responsible for running the Workload based on the `agent` field. +The Ankaios Server shall select the workloads targeted at an agent based on the `agent` field. + +Comment: +The field contents of the workloads are already rendered. Tags: - AnkaiosServer @@ -466,7 +498,7 @@ The CompleteState includes: - Agents Comment: -The field `Agents` is an associative data structure with the name of a connected agent as key and an associative data structure as value to store attributes of the agent by key/value pairs. +The field `Agents` is an associative data structure with the name of a connected agent as key and an associative data structure as value to store attributes of the agent by key/value pairs. If the DesiredState contains fields with templated strings, it is returned unrendered. Tags: - AnkaiosServer @@ -485,6 +517,9 @@ Status: approved When the Ankaios Server responses to a GetCompleteState request and the request contains a `field_mask`, the response includes the filed `api_version` and the fields listed in the `field_mask`. +Comment: +If the fields listed in the `field_mask` contain templated strings, they are returned unrendered. + Tags: - ControlInterface @@ -627,16 +662,16 @@ Needs: - impl - stest -### Update Current State +### Update Desired State -The behavioral diagram of the updating current state is shown in the chapter "UpdateState interface". +The behavioral diagram of updating the desired state is shown in the chapter "UpdateState interface". #### Server detects new workload `swdd~server-detects-new-workload~1` Status: approved -When the Ankaios Server gets the `ToServer` message `UpdateState` and detects a change of the state where a workload is present only in the New State, +When the Ankaios Server gets the `ToServer` message `UpdateStateRequest` and detects a change of the state where a workload is present only in the New State, the Ankaios Server shall send a `FromServer` message to the corresponding Ankaios Agent to add the workload. Tags: @@ -652,7 +687,7 @@ Needs: Status: approved -When the Ankaios Server gets the `ToServer` message `UpdateState` and detects a change of the state where a workload is present only in the Current State, +When the Ankaios Server gets the `ToServer` message `UpdateStateRequest` and detects a change of the state where a workload is present only in the Current State, the Ankaios Server shall send a `FromServer` message to the corresponding Ankaios Agent to delete the workload. Tags: @@ -668,7 +703,7 @@ Needs: Status: approved -When the Ankaios Server gets the `ToServer` message `UpdateState` and detects a change of the state where a workload is present in both states +When the Ankaios Server gets the `ToServer` message `UpdateStateRequest` and detects a change of the state where a workload is present in both states and at least one field of the workload is different, the Ankaios Server shall send a `FromServer` message to the corresponding Ankaios Agents to delete and add the workload. @@ -680,6 +715,96 @@ Needs: - utest - itest +#### ServerState compares rendered workload configurations +`swdd~server-state-compares-rendered-workloads~1` + +Status: approved + +When the ServerState determines changes in its State, the ServerState shall compare the rendered workload configurations of its current and new DesiredState. + +Rationale: +This ensures that the system recognizes a workload as changed when a configuration item referenced by that workload is updated. + +Tags: +- ServerState + +Needs: +- impl +- utest +- stest + +#### ServerState updates its desired state on unmodified workloads +`swdd~server-state-updates-state-on-unmodified-workloads~1` + +Status: approved + +When the ServerState is requested to update its State and the ServerState detects no change of workloads in its State, the ServerState shall replace its current DesiredState with the new DesiredState. + +Rationale: +The DesiredState must also be updated in other cases, such as when the config items are changed. + +Tags: +- ServerState + +Needs: +- impl +- utest + +#### ServerState triggers configuration rendering of workloads +`swdd~server-state-triggers-configuration-rendering-of-workloads~1` + +Status: approved + +When the ServerState is requested to update its State, the ServerState shall trigger the ConfigRenderer to render the workloads with the configuration items in the CompleteState. + +Rationale: +Rendering consumes resources and shall be done only once when updating the state. + +Tags: +- ServerState +- ConfigRenderer + +Needs: +- impl +- utest + +#### ServerState triggers validation of workload fields +`swdd~server-state-triggers-validation-of-workload-fields~1` + +Status: approved + +When the ServerState receives successfully rendered workloads from the ConfigRenderer, the ServerState shall trigger the workload to validate the format of its internal fields. + +Rationale: +Some workload fields only contain the final content after rendering. + +Tags: +- ServerState + +Needs: +- impl +- utest + +#### ConfigRenderer renders workload configuration +`swdd~config-renderer-renders-workload-configuration~1` + +Status: approved + +When the ConfigRenderer is requested to render the workloads with configuration items, for each provided workload that references config items inside its `configs` field, the ConfigRenderer shall: +* create a data structure containing memory references to the config items of the CompleteState referenced inside its `configs` field +* render the workload's `agent` and `runtimeConfig` fields by replacing each template string with the referenced configuration item content +* create a new workload configuration containing the rendered fields and the new instance name + +Comment: +In case of a render error, the workload configuration remains unrendered and an error is thrown. If a workload does not reference a configuration item, the rendering of that workload is skipped and its fields remain unrendered. + +Tags: +- ConfigRenderer + +Needs: +- impl +- utest + #### ServerState rejects state with cycle `swdd~server-state-rejects-state-with-cyclic-dependencies~1` diff --git a/server/doc/swdesign/plantuml/seq_update_state.plantuml b/server/doc/swdesign/plantuml/seq_update_state.plantuml index 01976212a..6d8f95211 100644 --- a/server/doc/swdesign/plantuml/seq_update_state.plantuml +++ b/server/doc/swdesign/plantuml/seq_update_state.plantuml @@ -16,11 +16,12 @@ end box activate agent1 ... -agent1 ->> tonic_server--++: UpdateState -tonic_server ->> server--++: UpdateState +agent1 ->> tonic_server--++: UpdateStateRequest +tonic_server ->> server--++: UpdateStateRequest server -> server_state++: update state -server_state -> server_state ++--: Compare new with\nold DesiredState -server_state -> server_state ++--: Validate the new state +server_state -> server_state ++--: Generate new State based on\nupdate mask and\nnew and old DesiredState +server_state -> server_state ++--: Render the\ntemplated DesiredState +server_state -> server_state ++--: Validate the new State server_state -> server_state ++--: Update DeleteGraph with\nDeleteConditions\nof new workloads alt State valid server_state -->> server--: Added and deleted workloads diff --git a/server/doc/swdesign/plantuml/seq_update_state.svg b/server/doc/swdesign/plantuml/seq_update_state.svg index 02921ee74..96f92cf19 100644 --- a/server/doc/swdesign/plantuml/seq_update_state.svg +++ b/server/doc/swdesign/plantuml/seq_update_state.svg @@ -1 +1 @@ -Ankaios ServerAnkaios Agents«thread»AnkaiosServerServerState«thread»GRPCCommunicationsServer«thread»tonic gRPC serverAgent 1Agent 2UpdateStateUpdateStateupdate stateCompare new withold DesiredStateValidate the new stateUpdate DeleteGraph withDeleteConditionsof new workloadsalt[State valid]Added and deleted workloadsCreate FromServer message fromadded and deleted workloadssend FromServer messageopt[ExuectionsRequests for Agent 1]send FromServer messageapply FromServer messageopt[ExuectionsRequests for Agent 2]send FromServer messageapply FromServer messageUpdateStateErrorlisten for incoming requests \ No newline at end of file +Ankaios ServerAnkaios Agents«thread»AnkaiosServerServerState«thread»GRPCCommunicationsServer«thread»tonic gRPC serverAgent 1Agent 2UpdateStateRequestUpdateStateRequestupdate stateGenerate new State based onupdate mask andnew and old DesiredStateRender thetemplated DesiredStateValidate the new StateUpdate DeleteGraph withDeleteConditionsof new workloadsalt[State valid]Added and deleted workloadsCreate FromServer message fromadded and deleted workloadssend FromServer messageopt[ExuectionsRequests for Agent 1]send FromServer messageapply FromServer messageopt[ExuectionsRequests for Agent 2]send FromServer messageapply FromServer messageUpdateStateErrorlisten for incoming requests \ No newline at end of file diff --git a/server/src/ankaios_server.rs b/server/src/ankaios_server.rs index 535e26ef5..0fcd63d22 100644 --- a/server/src/ankaios_server.rs +++ b/server/src/ankaios_server.rs @@ -12,6 +12,7 @@ // // SPDX-License-Identifier: Apache-2.0 +mod config_renderer; mod cycle_check; mod delete_graph; mod server_state; @@ -67,7 +68,7 @@ impl AnkaiosServer { pub async fn start(&mut self, startup_state: Option) -> Result<(), String> { if let Some(state) = startup_state { - State::verify_format(&state.desired_state)?; + State::verify_api_version(&state.desired_state)?; match self.server_state.update(state, vec![]) { Ok(Some((added_workloads, deleted_workloads))) => { @@ -230,8 +231,9 @@ impl AnkaiosServer { // [impl->swdd~update-desired-state-with-invalid-version~1] // [impl->swdd~update-desired-state-with-missing-version~1] // [impl->swdd~server-naming-convention~1] - if let Err(error_message) = - State::verify_format(&update_state_request.state.desired_state) + let updated_desired_state = &update_state_request.state.desired_state; + if let Err(error_message) = State::verify_api_version(updated_desired_state) + .and_then(|_| State::verify_configs_format(updated_desired_state)) { log::warn!("The CompleteState in the request has wrong format. {} -> ignoring the request", error_message); @@ -418,8 +420,8 @@ mod tests { use common::objects::{ generate_test_stored_workload_spec, generate_test_workload_spec_with_param, generate_test_workload_states_map_with_data, CompleteState, CpuUsage, DeletedWorkload, - ExecutionState, ExecutionStateEnum, FreeMemory, PendingSubstate, State, WorkloadInstanceName, - WorkloadState, + ExecutionState, ExecutionStateEnum, FreeMemory, PendingSubstate, State, + WorkloadInstanceName, WorkloadState, }; use common::test_utils::generate_test_proto_workload_with_param; use common::to_server_interface::ToServerInterface; @@ -442,7 +444,7 @@ mod tests { let (to_agents, mut comm_middle_ware_receiver) = create_from_server_channel(common::CHANNEL_CAPACITY); - // contains a self cycle to workload A + // contains a self cycle to workload_A let workload = generate_test_stored_workload_spec(AGENT_A, RUNTIME_NAME); let startup_state = CompleteState { @@ -548,7 +550,7 @@ mod tests { .once() .in_sequence(&mut seq) .return_const(Err(UpdateStateError::CycleInDependencies( - "workload A".to_string(), + "workload_A".to_string(), ))); let added_workloads = vec![updated_workload.clone()]; @@ -1722,8 +1724,7 @@ mod tests { let mut server = AnkaiosServer::new(server_receiver, to_agents); let mut mock_server_state = MockServerState::new(); - mock_server_state - + mock_server_state .expect_contains_connected_agent() .once() .return_const(false); diff --git a/server/src/ankaios_server/config_renderer.rs b/server/src/ankaios_server/config_renderer.rs new file mode 100644 index 000000000..f8db12002 --- /dev/null +++ b/server/src/ankaios_server/config_renderer.rs @@ -0,0 +1,365 @@ +// Copyright (c) 2024 Elektrobit Automotive GmbH +// +// This program and the accompanying materials are made available under the +// terms of the Apache License, Version 2.0 which is available at +// https://www.apache.org/licenses/LICENSE-2.0. +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. +// +// SPDX-License-Identifier: Apache-2.0 + +use std::{collections::HashMap, fmt}; + +use common::objects::{ConfigItem, StoredWorkloadSpec, WorkloadInstanceName, WorkloadSpec}; +use handlebars::Handlebars; + +pub type RenderedWorkloads = HashMap; + +#[cfg(test)] +use mockall::mock; + +#[derive(Debug, PartialEq, Eq)] +pub enum ConfigRenderError { + Field(String, String), + NotExistingConfigKey(String), +} + +impl fmt::Display for ConfigRenderError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + ConfigRenderError::Field(field, reason) => { + write!(f, "Failed to render field '{}': '{}'", field, reason) + } + ConfigRenderError::NotExistingConfigKey(config_key) => { + write!( + f, + "Workload references config key '{}' that does not exist", + config_key + ) + } + } + } +} + +// [impl->swdd~server-delegate-template-render-to-external-library~1] +pub struct ConfigRenderer { + template_engine: Handlebars<'static>, +} + +impl Default for ConfigRenderer { + fn default() -> Self { + let mut template_engine = Handlebars::new(); + template_engine.set_strict_mode(true); // enable throwing render errors if context data is valid + Self { template_engine } + } +} + +impl ConfigRenderer { + // [impl->swdd~config-renderer-renders-workload-configuration~1] + pub fn render_workloads( + &self, + workloads: &HashMap, + configs: &HashMap, + ) -> Result { + let mut rendered_workloads = HashMap::new(); + for (workload_name, stored_workload) in workloads { + let workload_spec = if stored_workload.configs.is_empty() { + log::debug!( + "Skipping to render workload '{}' as no config is assigned to the workload", + workload_name + ); + WorkloadSpec::from((workload_name.to_owned(), stored_workload.clone())) + } else { + let wl_config_map = + self.create_config_map_for_workload(stored_workload, configs)?; + log::debug!( + "Rendering workload '{}' with config '{:?}'", + workload_name, + wl_config_map + ); + self.render_workload_fields(workload_name, stored_workload, &wl_config_map)? + }; + + rendered_workloads.insert(workload_name.clone(), workload_spec); + } + log::debug!("Rendered CompleteState: {:?}", rendered_workloads); + Ok(rendered_workloads) + } + + // [impl->swdd~config-renderer-renders-workload-configuration~1] + fn create_config_map_for_workload<'a>( + &self, + workload_spec: &'a StoredWorkloadSpec, + configs: &'a HashMap, + ) -> Result, ConfigRenderError> { + let mut wl_config_map = HashMap::new(); + for (config_alias, config_key) in &workload_spec.configs { + if let Some(config_value) = configs.get(config_key) { + wl_config_map.insert(config_alias, config_value); + } else { + return Err(ConfigRenderError::NotExistingConfigKey(config_key.clone())); + } + } + Ok(wl_config_map) + } + + // [impl->swdd~config-renderer-renders-workload-configuration~1] + fn render_workload_fields( + &self, + workload_name: &str, + workload: &StoredWorkloadSpec, + wl_config_map: &HashMap<&String, &ConfigItem>, + ) -> Result { + let rendered_runtime_config = self + .template_engine + .render_template(&workload.runtime_config, &wl_config_map) + .map_err(|err| ConfigRenderError::Field("runtimeConfig".to_owned(), err.to_string()))?; + + let rendered_agent_name = self + .template_engine + .render_template(&workload.agent, &wl_config_map) + .map_err(|err| ConfigRenderError::Field("agent".to_owned(), err.to_string()))?; + + Ok(WorkloadSpec { + instance_name: WorkloadInstanceName::builder() + .workload_name(workload_name) + .agent_name(rendered_agent_name) + .config(&rendered_runtime_config) + .build(), + runtime: workload.runtime.clone(), + runtime_config: rendered_runtime_config, + tags: workload.tags.clone(), + dependencies: workload.dependencies.clone(), + restart_policy: workload.restart_policy.clone(), + control_interface_access: workload.control_interface_access.clone(), + }) + } +} + +////////////////////////////////////////////////////////////////////////////// +// ######## ####### ######### ######### // +// ## ## ## ## // +// ## ##### ######### ## // +// ## ## ## ## // +// ## ####### ######### ## // +////////////////////////////////////////////////////////////////////////////// + +#[cfg(test)] +mock! { + pub ConfigRenderer { + pub fn render_workloads( + &self, + workloads: &HashMap, + configs: &HashMap, + ) -> Result; + } +} + +#[cfg(test)] +mod tests { + use super::{ConfigRenderError, ConfigRenderer, RenderedWorkloads}; + use std::collections::HashMap; + + use common::objects::{ + generate_test_configs, generate_test_stored_workload_spec_with_config, + generate_test_workload_spec_with_runtime_config, + }; + + const WORKLOAD_NAME_1: &str = "workload_1"; + const AGENT_A: &str = "agent_A"; + const RUNTIME: &str = "runtime"; + + // [utest->swdd~config-renderer-renders-workload-configuration~1] + #[test] + fn utest_render_workloads_render_required_fields_successfully() { + let templated_runtime_config = + "some_value_1: {{ref1.values.value_1}}\nsome_value_2: {{ref1.values.value_2.0}}"; + let templated_agent_name = "{{ref1.agent_name}}"; + let stored_workload = generate_test_stored_workload_spec_with_config( + templated_agent_name, + RUNTIME, + templated_runtime_config, + ); + + let workloads = HashMap::from([(WORKLOAD_NAME_1.to_owned(), stored_workload)]); + let configs = generate_test_configs(); + let renderer = ConfigRenderer::default(); + + let expected_workload_spec = generate_test_workload_spec_with_runtime_config( + AGENT_A.to_owned(), + WORKLOAD_NAME_1.to_owned(), + RUNTIME.to_owned(), + "some_value_1: value123\nsome_value_2: list_value_1".to_owned(), + ); + + let result = renderer.render_workloads(&workloads, &configs); + + assert_eq!( + Ok(RenderedWorkloads::from([( + WORKLOAD_NAME_1.to_owned(), + expected_workload_spec + )])), + result + ); + } + + // [utest->swdd~config-renderer-renders-workload-configuration~1] + #[test] + fn utest_render_workloads_fails_field_uses_config_key_instead_of_alias() { + let templated_runtime_config = "config_1: {{config_1.values.value_1}}"; + let stored_workload = generate_test_stored_workload_spec_with_config( + AGENT_A, + RUNTIME, + templated_runtime_config, + ); + + let workloads = HashMap::from([(WORKLOAD_NAME_1.to_owned(), stored_workload)]); + let configs = generate_test_configs(); + let renderer = ConfigRenderer::default(); + + assert!(renderer.render_workloads(&workloads, &configs).is_err()); + } + + // [utest->swdd~config-renderer-renders-workload-configuration~1] + #[test] + fn utest_render_workloads_not_rendering_workloads_with_no_referenced_configs() { + let templated_runtime_config = "config_1: {{config_1.values.value_1}}"; + let templated_agent_name = "{{config_1.agent_name}}"; + let mut stored_workload = generate_test_stored_workload_spec_with_config( + templated_agent_name, + RUNTIME, + templated_runtime_config, + ); + + stored_workload.configs.clear(); // no configs assigned + + let workloads = HashMap::from([(WORKLOAD_NAME_1.to_owned(), stored_workload)]); + let configs = generate_test_configs(); + let renderer = ConfigRenderer::default(); + + let expected_workload_spec = generate_test_workload_spec_with_runtime_config( + templated_agent_name.to_owned(), + WORKLOAD_NAME_1.to_owned(), + RUNTIME.to_owned(), + templated_runtime_config.to_owned(), + ); + + let result = renderer.render_workloads(&workloads, &configs); + + assert_eq!( + Ok(RenderedWorkloads::from([( + WORKLOAD_NAME_1.to_owned(), + expected_workload_spec + )])), + result + ); + } + + // [utest->swdd~config-renderer-renders-workload-configuration~1] + #[test] + fn utest_render_workloads_fails_workload_references_not_existing_config_key() { + let templated_runtime_config = "config_1: {{ref1.values.value_1}}"; + let mut stored_workload = generate_test_stored_workload_spec_with_config( + AGENT_A, + RUNTIME, + templated_runtime_config, + ); + + stored_workload.configs = + HashMap::from([("ref1".to_owned(), "not_existing_config_key".to_owned())]); + + let workloads = HashMap::from([(WORKLOAD_NAME_1.to_owned(), stored_workload)]); + let configs = generate_test_configs(); + let renderer = ConfigRenderer::default(); + let result = renderer.render_workloads(&workloads, &configs); + assert!(result.is_err()); + assert!( + matches!(result.unwrap_err(), ConfigRenderError::NotExistingConfigKey(config_key) if config_key == "not_existing_config_key") + ); + } + + // [utest->swdd~config-renderer-renders-workload-configuration~1] + #[test] + fn utest_render_workloads_fails_workload_references_unused_not_existing_config_key() { + let mut stored_workload = + generate_test_stored_workload_spec_with_config(AGENT_A, RUNTIME, "some runtime config"); + + stored_workload.configs = HashMap::from([( + "ref1".to_owned(), + "not_existing_unused_config_key".to_owned(), + )]); + + let workloads = HashMap::from([(WORKLOAD_NAME_1.to_owned(), stored_workload)]); + let configs = generate_test_configs(); + let renderer = ConfigRenderer::default(); + let result = renderer.render_workloads(&workloads, &configs); + assert!(result.is_err()); + assert!( + matches!(result.unwrap_err(), ConfigRenderError::NotExistingConfigKey(config_key) if config_key == "not_existing_unused_config_key") + ); + } + + // [utest->swdd~config-renderer-renders-workload-configuration~1] + #[test] + fn utest_render_workloads_fails_runtime_config_contains_non_existing_config() { + let templated_runtime_config = "config_1: {{config_1.values.not_existing_key}}"; + let stored_workload = generate_test_stored_workload_spec_with_config( + AGENT_A, + RUNTIME, + templated_runtime_config, + ); + + let workloads = HashMap::from([(WORKLOAD_NAME_1.to_owned(), stored_workload)]); + let configs = generate_test_configs(); + let renderer = ConfigRenderer::default(); + + let result = renderer.render_workloads(&workloads, &configs); + + assert!(result.is_err()); + assert!( + matches!(result.unwrap_err(), ConfigRenderError::Field(field, _) if field == "runtimeConfig") + ); + } + + // [utest->swdd~config-renderer-renders-workload-configuration~1] + #[test] + fn utest_render_workloads_fails_agent_contains_non_existing_config() { + let stored_workload = generate_test_stored_workload_spec_with_config( + "{{config_1.not_existing_key}}", + RUNTIME, + "some runtime config", + ); + + let workloads = HashMap::from([(WORKLOAD_NAME_1.to_owned(), stored_workload)]); + let configs = generate_test_configs(); + let renderer = ConfigRenderer::default(); + + let result = renderer.render_workloads(&workloads, &configs); + + assert!(result.is_err()); + assert!( + matches!(result.unwrap_err(), ConfigRenderError::Field(field, _) if field == "agent") + ); + } + + // [utest->swdd~config-renderer-renders-workload-configuration~1] + #[test] + fn utest_render_workloads_fails_workload_references_empty_configs() { + let templated_runtime_config = "config_1: {{config_1.values.value_1}}"; + let stored_workload = generate_test_stored_workload_spec_with_config( + AGENT_A, + RUNTIME, + templated_runtime_config, + ); + + let workloads = HashMap::from([(WORKLOAD_NAME_1.to_owned(), stored_workload)]); + let configs = HashMap::default(); + let renderer = ConfigRenderer::default(); + + assert!(renderer.render_workloads(&workloads, &configs).is_err()); + } +} diff --git a/server/src/ankaios_server/server_state.rs b/server/src/ankaios_server/server_state.rs index e2c5b3ac2..6f94b6d15 100644 --- a/server/src/ankaios_server/server_state.rs +++ b/server/src/ankaios_server/server_state.rs @@ -12,19 +12,23 @@ // // SPDX-License-Identifier: Apache-2.0 +use super::config_renderer::RenderedWorkloads; use api::ank_base; use common::commands; +#[cfg_attr(test, mockall_double::double)] +use super::config_renderer::ConfigRenderer; + use super::cycle_check; #[cfg_attr(test, mockall_double::double)] use super::delete_graph::DeleteGraph; use common::objects::{ - AgentAttributes, CpuUsage, FreeMemory, WorkloadInstanceName, WorkloadState, WorkloadStatesMap, + AgentAttributes, CpuUsage, FreeMemory, State, WorkloadState, WorkloadStatesMap, }; use common::std_extensions::IllegalStateResult; use common::{ commands::CompleteStateRequest, - objects::{CompleteState, DeletedWorkload, State, WorkloadSpec}, + objects::{CompleteState, DeletedWorkload, WorkloadSpec}, state_manipulation::{Object, Path}, }; use std::fmt::Display; @@ -32,67 +36,29 @@ use std::fmt::Display; #[cfg(test)] use mockall::automock; -fn update_state( - desired_state: &CompleteState, - updated_state: CompleteState, - update_mask: Vec, -) -> Result { - // [impl->swdd~update-desired-state-empty-update-mask~1] - if update_mask.is_empty() { - return Ok(updated_state); - } - - // [impl->swdd~update-desired-state-with-update-mask~1] - let mut new_state: Object = desired_state.try_into().map_err(|err| { - UpdateStateError::ResultInvalid(format!("Failed to parse current state, '{}'", err)) - })?; - let state_from_update: Object = updated_state.try_into().map_err(|err| { - UpdateStateError::ResultInvalid(format!("Failed to parse new state, '{}'", err)) - })?; - - for field in update_mask { - let field: Path = field.into(); - if let Some(field_from_update) = state_from_update.get(&field) { - if new_state.set(&field, field_from_update.to_owned()).is_err() { - return Err(UpdateStateError::FieldNotFound(field.into())); - } - } else if new_state.remove(&field).is_err() { - return Err(UpdateStateError::FieldNotFound(field.into())); - } - } - - if let Ok(new_state) = new_state.try_into() { - Ok(new_state) - } else { - Err(UpdateStateError::ResultInvalid( - "Could not parse into CompleteState.".to_string(), - )) - } -} - fn extract_added_and_deleted_workloads( - desired_state: &State, - new_state: &State, + current_workloads: &RenderedWorkloads, + new_workloads: &RenderedWorkloads, ) -> Option<(Vec, Vec)> { let mut added_workloads: Vec = Vec::new(); let mut deleted_workloads: Vec = Vec::new(); // find updated or deleted workloads - desired_state.workloads.iter().for_each(|(wl_name, wls)| { - if let Some(new_wls) = new_state.workloads.get(wl_name) { + current_workloads.iter().for_each(|(wl_name, wls)| { + if let Some(new_wls) = new_workloads.get(wl_name) { // The new workload is identical with existing or updated. Lets check if it is an update. if wls != new_wls { // [impl->swdd~server-detects-changed-workload~1] - added_workloads.push(WorkloadSpec::from((wl_name.to_owned(), new_wls.clone()))); + added_workloads.push(new_wls.clone()); deleted_workloads.push(DeletedWorkload { - instance_name: WorkloadInstanceName::from((wl_name.to_owned(), wls)), + instance_name: wls.instance_name.clone(), ..Default::default() }); } } else { // [impl->swdd~server-detects-deleted-workload~1] deleted_workloads.push(DeletedWorkload { - instance_name: WorkloadInstanceName::from((wl_name.to_owned(), wls)), + instance_name: wls.instance_name.clone(), ..Default::default() }); } @@ -100,17 +66,11 @@ fn extract_added_and_deleted_workloads( // find new workloads // [impl->swdd~server-detects-new-workload~1] - new_state - .workloads - .iter() - .for_each(|(new_wl_name, new_wls)| { - if !desired_state.workloads.contains_key(new_wl_name) { - added_workloads.push(WorkloadSpec::from(( - new_wl_name.to_owned(), - new_wls.clone(), - ))); - } - }); + new_workloads.iter().for_each(|(new_wl_name, new_wls)| { + if !current_workloads.contains_key(new_wl_name) { + added_workloads.push(new_wls.clone()); + } + }); if added_workloads.is_empty() && deleted_workloads.is_empty() { return None; @@ -149,7 +109,9 @@ impl Display for UpdateStateError { #[derive(Default)] pub struct ServerState { state: CompleteState, + rendered_workloads: RenderedWorkloads, delete_graph: DeleteGraph, + config_renderer: ConfigRenderer, } pub type AddedDeletedWorkloads = Option<(Vec, Vec)>; @@ -185,6 +147,8 @@ impl ServerState { let current_complete_state: Object = current_complete_state.try_into().unwrap_or_illegal_state(); let mut return_state = Object::default(); + + log::debug!("Current state: {:?}", current_complete_state); for field in &filters { if let Some(value) = current_complete_state.get(&field.into()) { return_state.set(&field.into(), value.to_owned())?; @@ -209,14 +173,10 @@ impl ServerState { // [impl->swdd~agent-from-agent-field~1] pub fn get_workloads_for_agent(&self, agent_name: &str) -> Vec { - self.state - .desired_state - .workloads + self.rendered_workloads .iter() - .filter(|(_, workload)| workload.agent.eq(agent_name)) - .map(|(workload_name, workload)| { - WorkloadSpec::from((workload_name.clone(), workload.clone())) - }) + .filter(|(_, workload)| workload.instance_name.agent_name().eq(agent_name)) + .map(|(_, workload)| workload.clone()) .collect() } @@ -227,11 +187,24 @@ impl ServerState { ) -> Result { // [impl->swdd~update-desired-state-with-update-mask~1] // [impl->swdd~update-desired-state-empty-update-mask~1] - match update_state(&self.state, new_state, update_mask) { - Ok(new_state) => { + match self.generate_new_state(new_state, update_mask) { + Ok(new_templated_state) => { + // [impl->swdd~server-state-triggers-configuration-rendering-of-workloads~1] + let new_rendered_workloads = self + .config_renderer + .render_workloads( + &new_templated_state.desired_state.workloads, + &new_templated_state.desired_state.configs, + ) + .map_err(|err| UpdateStateError::ResultInvalid(err.to_string()))?; + + // [impl->swdd~server-state-triggers-validation-of-workload-fields~1] + self.verify_workload_fields_format(&new_rendered_workloads)?; + + // [impl->swdd~server-state-compares-rendered-workloads~1] let cmd = extract_added_and_deleted_workloads( - &self.state.desired_state, - &new_state.desired_state, + &self.rendered_workloads, + &new_rendered_workloads, ); if let Some((added_workloads, mut deleted_workloads)) = cmd { @@ -248,7 +221,7 @@ impl ServerState { // [impl->swdd~server-state-rejects-state-with-cyclic-dependencies~1] if let Some(workload_part_of_cycle) = - cycle_check::dfs(&new_state.desired_state, Some(start_nodes)) + cycle_check::dfs(&new_templated_state.desired_state, Some(start_nodes)) { return Err(UpdateStateError::CycleInDependencies( workload_part_of_cycle, @@ -262,10 +235,13 @@ impl ServerState { self.delete_graph .apply_delete_conditions_to(&mut deleted_workloads); - self.state = new_state; + self.set_desired_state(new_templated_state.desired_state); + self.rendered_workloads = new_rendered_workloads; Ok(Some((added_workloads, deleted_workloads))) } else { - self.state = new_state; + // update state with changed fields not affecting workloads, e.g. config items + // [impl->swdd~server-state-updates-state-on-unmodified-workloads~1] + self.set_desired_state(new_templated_state.desired_state); Ok(None) } } @@ -310,6 +286,59 @@ impl ServerState { self.delete_graph .remove_deleted_workloads_from_delete_graph(new_workload_states); } + + fn generate_new_state( + &mut self, + updated_state: CompleteState, + update_mask: Vec, + ) -> Result { + // [impl->swdd~update-desired-state-empty-update-mask~1] + if update_mask.is_empty() { + return Ok(updated_state); + } + + // [impl->swdd~update-desired-state-with-update-mask~1] + let mut new_state: Object = (&self.state).try_into().map_err(|err| { + UpdateStateError::ResultInvalid(format!("Failed to parse current state, '{}'", err)) + })?; + let state_from_update: Object = updated_state.try_into().map_err(|err| { + UpdateStateError::ResultInvalid(format!("Failed to parse new state, '{}'", err)) + })?; + + for field in update_mask { + let field: Path = field.into(); + if let Some(field_from_update) = state_from_update.get(&field) { + if new_state.set(&field, field_from_update.to_owned()).is_err() { + return Err(UpdateStateError::FieldNotFound(field.into())); + } + } else if new_state.remove(&field).is_err() { + return Err(UpdateStateError::FieldNotFound(field.into())); + } + } + + new_state.try_into().map_err(|err| { + UpdateStateError::ResultInvalid(format!( + "Could not parse into CompleteState: '{}'", + err + )) + }) + } + + fn set_desired_state(&mut self, new_desired_state: State) { + self.state.desired_state = new_desired_state; + } + + // [impl->swdd~server-state-triggers-validation-of-workload-fields~1] + fn verify_workload_fields_format( + &self, + workloads: &RenderedWorkloads, + ) -> Result<(), UpdateStateError> { + for workload_spec in workloads.values() { + WorkloadSpec::verify_fields_format(workload_spec) + .map_err(UpdateStateError::ResultInvalid)?; + } + Ok(()) + } } ////////////////////////////////////////////////////////////////////////////// @@ -327,15 +356,20 @@ mod tests { use common::{ commands::{AgentLoadStatus, CompleteStateRequest}, objects::{ - generate_test_agent_map, generate_test_stored_workload_spec, + generate_test_agent_map, generate_test_configs, generate_test_stored_workload_spec, generate_test_workload_spec_with_control_interface_access, - generate_test_workload_spec_with_param, AgentMap, CompleteState, CpuUsage, + generate_test_workload_spec_with_param, AgentMap, CompleteState, ConfigItem, CpuUsage, DeletedWorkload, FreeMemory, State, WorkloadSpec, WorkloadStatesMap, }, test_utils::{self, generate_test_complete_state}, }; + use mockall::predicate; - use crate::ankaios_server::{delete_graph::MockDeleteGraph, server_state::UpdateStateError}; + use crate::ankaios_server::{ + config_renderer::{ConfigRenderError, MockConfigRenderer, RenderedWorkloads}, + delete_graph::MockDeleteGraph, + server_state::UpdateStateError, + }; use super::ServerState; const AGENT_A: &str = "agent_A"; @@ -346,6 +380,19 @@ mod tests { const WORKLOAD_NAME_4: &str = "workload_4"; const RUNTIME: &str = "runtime"; + fn generate_rendered_workloads_from_state(state: &State) -> RenderedWorkloads { + state + .workloads + .iter() + .map(|(name, spec)| { + ( + name.to_owned(), + WorkloadSpec::from((name.to_owned(), spec.to_owned())), + ) + }) + .collect() + } + // [utest->swdd~server-provides-interface-get-complete-state~2] // [utest->swdd~server-filters-get-complete-state-result~2] #[test] @@ -534,8 +581,14 @@ mod tests { RUNTIME.to_string(), ); + let old_complete_state = + generate_test_complete_state(vec![w1.clone(), w2.clone(), w3.clone()]); + let server_state = ServerState { - state: generate_test_complete_state(vec![w1.clone(), w2.clone(), w3.clone()]), + rendered_workloads: generate_rendered_workloads_from_state( + &old_complete_state.desired_state, + ), + state: old_complete_state, ..Default::default() }; @@ -561,7 +614,7 @@ mod tests { let workload = generate_test_stored_workload_spec(AGENT_A.to_string(), RUNTIME.to_string()); - // workload has a self cycle to workload A + // workload has a self cycle to workload_A let new_workload_1 = generate_test_stored_workload_spec(AGENT_A.to_string(), RUNTIME.to_string()); @@ -580,7 +633,7 @@ mod tests { let rejected_new_state = CompleteState { desired_state: State { workloads: HashMap::from([ - ("workload A".to_string(), new_workload_1), + ("workload_A".to_string(), new_workload_1), (WORKLOAD_NAME_1.to_string(), new_workload_2), ]), ..Default::default() @@ -594,16 +647,29 @@ mod tests { .expect_apply_delete_conditions_to() .never(); + let mut mock_config_renderer = MockConfigRenderer::new(); + let cloned_rejected_state = rejected_new_state.desired_state.clone(); + mock_config_renderer + .expect_render_workloads() + .once() + .returning(move |_, _| { + Ok(generate_rendered_workloads_from_state( + &cloned_rejected_state, + )) + }); + let mut server_state = ServerState { state: old_state.clone(), + rendered_workloads: generate_rendered_workloads_from_state(&old_state.desired_state), delete_graph: delete_graph_mock, + config_renderer: mock_config_renderer, }; - let result = server_state.update(rejected_new_state.clone(), vec![]); + let result = server_state.update(rejected_new_state, vec![]); assert_eq!( result, Err(UpdateStateError::CycleInDependencies( - "workload A".to_string() + "workload_A".to_string() )) ); @@ -612,6 +678,7 @@ mod tests { } // [utest->swdd~update-desired-state-empty-update-mask~1] + // [utest->swdd~server-state-triggers-configuration-rendering-of-workloads~1] #[test] fn utest_server_state_update_state_replace_all_if_update_mask_empty() { let _ = env_logger::builder().is_test(true).try_init(); @@ -628,19 +695,31 @@ mod tests { .once() .return_const(()); + let mut mock_config_renderer = MockConfigRenderer::new(); + let clone_updated_state = update_state.desired_state.clone(); + mock_config_renderer + .expect_render_workloads() + .once() + .returning(move |_, _| { + Ok(generate_rendered_workloads_from_state(&clone_updated_state)) + }); + let mut server_state = ServerState { state: old_state.clone(), + rendered_workloads: generate_rendered_workloads_from_state(&old_state.desired_state), delete_graph: delete_graph_mock, + config_renderer: mock_config_renderer, }; server_state .update(update_state.clone(), update_mask) .unwrap(); - assert_eq!(update_state, server_state.state); + assert_eq!(update_state.desired_state, server_state.state.desired_state); } // [utest->swdd~update-desired-state-with-update-mask~1] + // [utest->swdd~server-state-triggers-configuration-rendering-of-workloads~1] #[test] fn utest_server_state_update_state_replace_workload() { let _ = env_logger::builder().is_test(true).try_init(); @@ -669,9 +748,22 @@ mod tests { .once() .return_const(()); + let mut mock_config_renderer = MockConfigRenderer::new(); + let cloned_expected_state = expected.desired_state.clone(); + mock_config_renderer + .expect_render_workloads() + .once() + .returning(move |_, _| { + Ok(generate_rendered_workloads_from_state( + &cloned_expected_state, + )) + }); + let mut server_state = ServerState { state: old_state.clone(), + rendered_workloads: generate_rendered_workloads_from_state(&old_state.desired_state), delete_graph: delete_graph_mock, + config_renderer: mock_config_renderer, }; server_state.update(update_state, update_mask).unwrap(); @@ -679,6 +771,8 @@ mod tests { } // [utest->swdd~update-desired-state-with-update-mask~1] + // [utest->swdd~server-state-triggers-configuration-rendering-of-workloads~1] + // [utest->swdd~server-state-triggers-validation-of-workload-fields~1] #[test] fn utest_server_state_update_state_add_workload() { let old_state = generate_test_old_state(); @@ -706,9 +800,26 @@ mod tests { .once() .return_const(()); + let mut mock_config_renderer = MockConfigRenderer::new(); + mock_config_renderer + .expect_render_workloads() + .once() + .returning(move |_, _| { + Ok(RenderedWorkloads::from([( + WORKLOAD_NAME_4.to_owned(), + generate_test_workload_spec_with_param( + new_workload.agent.clone(), + WORKLOAD_NAME_4.to_owned(), + new_workload.runtime.clone(), + ), + )])) + }); + let mut server_state = ServerState { state: old_state.clone(), + rendered_workloads: generate_rendered_workloads_from_state(&old_state.desired_state), delete_graph: delete_graph_mock, + config_renderer: mock_config_renderer, }; server_state.update(update_state, update_mask).unwrap(); @@ -716,6 +827,250 @@ mod tests { } // [utest->swdd~update-desired-state-with-update-mask~1] + // [utest->swdd~server-state-triggers-configuration-rendering-of-workloads~1] + // [utest->swdd~server-state-updates-state-on-unmodified-workloads~1] + #[test] + fn utest_server_state_update_state_update_configs_not_affecting_workloads() { + let old_state = generate_test_old_state(); + let mut state_with_updated_config = old_state.clone(); + state_with_updated_config.desired_state.configs = generate_test_configs(); + + let update_mask = vec!["desiredState".to_string()]; + + let mut delete_graph_mock = MockDeleteGraph::new(); + delete_graph_mock.expect_insert().never(); + + delete_graph_mock + .expect_apply_delete_conditions_to() + .never(); + + let mut mock_config_renderer = MockConfigRenderer::new(); + let cloned_state_with_updated_config = state_with_updated_config.desired_state.clone(); + mock_config_renderer + .expect_render_workloads() + .once() + .with( + predicate::eq(state_with_updated_config.desired_state.workloads.clone()), + predicate::eq(state_with_updated_config.desired_state.configs.clone()), + ) + .returning(move |_, _| { + Ok(generate_rendered_workloads_from_state( + &cloned_state_with_updated_config, + )) + }); + + let mut server_state = ServerState { + state: old_state.clone(), + rendered_workloads: generate_rendered_workloads_from_state(&old_state.desired_state), + delete_graph: delete_graph_mock, + config_renderer: mock_config_renderer, + }; + + let expected = state_with_updated_config.clone(); + + let added_deleted_workloads = server_state + .update(state_with_updated_config, update_mask) + .unwrap(); + + assert!(added_deleted_workloads.is_none()); + assert_eq!(expected, server_state.state); + } + + // [utest->swdd~update-desired-state-with-update-mask~1] + // [utest->swdd~server-state-triggers-configuration-rendering-of-workloads~1] + #[test] + fn utest_server_state_update_state_update_workload_with_existing_configs() { + let mut old_state = generate_test_old_state(); + old_state.desired_state.configs = generate_test_configs(); + + let mut updated_state = old_state.clone(); + updated_state.desired_state.configs = HashMap::from([( + "config_1".to_string(), + ConfigItem::ConfigObject(HashMap::from([( + "agent_name".to_string(), + ConfigItem::String(AGENT_B.to_owned()), // changed agent name in configs + )])), + )]); + + let updated_workload = updated_state + .desired_state + .workloads + .get_mut(WORKLOAD_NAME_1) + .unwrap(); + + updated_workload.runtime_config = "updated runtime config".to_string(); // changed runtime config + + // update mask references only changed workload + let update_mask = vec![format!("desiredState.workloads.{WORKLOAD_NAME_1}")]; + + let mut delete_graph_mock = MockDeleteGraph::new(); + delete_graph_mock.expect_insert().once().return_const(()); + + delete_graph_mock + .expect_apply_delete_conditions_to() + .once() + .return_const(()); + + let mut mock_config_renderer = MockConfigRenderer::new(); + let state_to_render = updated_state.desired_state.clone(); + mock_config_renderer + .expect_render_workloads() + .once() + .with( + predicate::eq(updated_state.desired_state.workloads.clone()), + predicate::eq(old_state.desired_state.configs.clone()), // existing configs due to update mask + ) + .returning(move |_, _| Ok(generate_rendered_workloads_from_state(&state_to_render))); + + let mut server_state = ServerState { + state: old_state.clone(), + rendered_workloads: generate_rendered_workloads_from_state(&old_state.desired_state), + delete_graph: delete_graph_mock, + config_renderer: mock_config_renderer, + }; + + let mut expected = updated_state.clone(); + expected.desired_state.configs = old_state.desired_state.configs.clone(); // existing configs due to update mask + + let result = server_state.update(updated_state, update_mask); + assert!(result.is_ok()); + + let (added_workloads, _) = result.unwrap().unwrap_or_default(); + + let new_workload = added_workloads + .iter() + .find(|w| w.instance_name.workload_name() == WORKLOAD_NAME_1); + + assert!(new_workload.is_some()); + assert_eq!(new_workload.unwrap().instance_name.agent_name(), AGENT_A); // assume not updated due to update mask + + assert_eq!(expected, server_state.state); + } + + // [utest->swdd~update-desired-state-with-update-mask~1] + // [utest->swdd~server-state-triggers-configuration-rendering-of-workloads~1] + // [utest->swdd~server-state-compares-rendered-workloads~1] + #[test] + fn utest_server_state_update_state_update_workload_on_changed_configs() { + let mut old_state = generate_test_old_state(); + old_state.desired_state.configs = generate_test_configs(); + + let mut updated_state = old_state.clone(); + updated_state.desired_state.configs = HashMap::from([( + "config_1".to_string(), + ConfigItem::ConfigObject(HashMap::from([( + "agent_name".to_string(), + ConfigItem::String(AGENT_B.to_owned()), // changed agent name in configs + )])), + )]); + + let update_mask = vec!["desiredState.configs".to_string()]; + + let mut delete_graph_mock = MockDeleteGraph::new(); + delete_graph_mock.expect_insert().once().return_const(()); + + delete_graph_mock + .expect_apply_delete_conditions_to() + .once() + .return_const(()); + + let mut state_to_render = updated_state.desired_state.clone(); + let new_rendered_workload = state_to_render.workloads.get_mut(WORKLOAD_NAME_1).unwrap(); + new_rendered_workload.agent = AGENT_B.to_owned(); // updated agent name + + let mut mock_config_renderer = MockConfigRenderer::new(); + mock_config_renderer + .expect_render_workloads() + .once() + .with( + predicate::eq(updated_state.desired_state.workloads.clone()), + predicate::eq(updated_state.desired_state.configs.clone()), + ) + .returning(move |_, _| Ok(generate_rendered_workloads_from_state(&state_to_render))); + + let mut server_state = ServerState { + state: old_state.clone(), + rendered_workloads: generate_rendered_workloads_from_state(&old_state.desired_state), + delete_graph: delete_graph_mock, + config_renderer: mock_config_renderer, + }; + + let expected = updated_state.clone(); + + let result = server_state.update(updated_state, update_mask); + assert!(result.is_ok()); + + let (added_workloads, deleted_workloads) = result.unwrap().unwrap_or_default(); + + let new_workload = added_workloads + .iter() + .find(|w| w.instance_name.workload_name() == WORKLOAD_NAME_1); + + assert!(new_workload.is_some()); + assert_eq!(new_workload.unwrap().instance_name.agent_name(), AGENT_B); // updated with new agent name + + let deleted_workload = deleted_workloads + .iter() + .find(|w| w.instance_name.workload_name() == WORKLOAD_NAME_1); + assert!(deleted_workload.is_some()); + assert_eq!( + deleted_workload.unwrap().instance_name.agent_name(), + AGENT_A + ); // deleted with old agent name + + assert_eq!(expected, server_state.state); + } + + // [utest->swdd~update-desired-state-with-update-mask~1] + // [utest->swdd~server-state-triggers-configuration-rendering-of-workloads~1] + #[test] + fn utest_server_state_update_state_workload_references_removed_configs() { + let _ = env_logger::builder().is_test(true).try_init(); + let mut old_state = generate_test_old_state(); + old_state.desired_state.configs = generate_test_configs(); + + let mut updated_state = old_state.clone(); + updated_state.desired_state.configs.clear(); + + let update_mask = vec!["desiredState".to_string()]; + + let mut delete_graph_mock = MockDeleteGraph::new(); + delete_graph_mock.expect_insert().never(); + + delete_graph_mock + .expect_apply_delete_conditions_to() + .never(); + + let mut mock_config_renderer = MockConfigRenderer::new(); + mock_config_renderer + .expect_render_workloads() + .once() + .returning(move |_, _| { + Err(ConfigRenderError::Field( + "agent".to_string(), + "config item does not exist".to_string(), + )) + }); + + let mut server_state = ServerState { + state: old_state.clone(), + rendered_workloads: generate_rendered_workloads_from_state(&old_state.desired_state), + delete_graph: delete_graph_mock, + config_renderer: mock_config_renderer, + }; + + let result = server_state.update(updated_state, update_mask); + assert!(result.is_err()); + assert!(result + .unwrap_err() + .to_string() + .contains("config item does not exist")); + + assert_eq!(old_state, server_state.state); // keep old state + } + + // [utest->swdd~update-desired-state-with-update-mask~1] + // [utest->swdd~server-state-triggers-configuration-rendering-of-workloads~1] #[test] fn utest_server_state_update_state_remove_workload() { let old_state = generate_test_old_state(); @@ -737,9 +1092,18 @@ mod tests { .once() .return_const(()); + let mut mock_config_renderer = MockConfigRenderer::new(); + let cloned_new_state = expected.desired_state.clone(); + mock_config_renderer + .expect_render_workloads() + .once() + .returning(move |_, _| Ok(generate_rendered_workloads_from_state(&cloned_new_state))); + let mut server_state = ServerState { state: old_state.clone(), + rendered_workloads: generate_rendered_workloads_from_state(&old_state.desired_state), delete_graph: delete_graph_mock, + config_renderer: mock_config_renderer, }; server_state.update(update_state, update_mask).unwrap(); @@ -747,6 +1111,7 @@ mod tests { } // [utest->swdd~update-desired-state-with-update-mask~1] + // [utest->swdd~server-state-triggers-configuration-rendering-of-workloads~1] #[test] fn utest_server_state_update_state_remove_non_existing_workload() { let old_state = generate_test_old_state(); @@ -761,9 +1126,22 @@ mod tests { .expect_apply_delete_conditions_to() .never(); + let mut mock_config_renderer = MockConfigRenderer::new(); + let cloned_old_state = old_state.clone(); + mock_config_renderer + .expect_render_workloads() + .once() + .returning(move |_, _| { + Ok(generate_rendered_workloads_from_state( + &cloned_old_state.desired_state, + )) + }); + let mut server_state = ServerState { state: old_state.clone(), + rendered_workloads: generate_rendered_workloads_from_state(&old_state.desired_state), delete_graph: delete_graph_mock, + config_renderer: mock_config_renderer, }; server_state.update(update_state, update_mask).unwrap(); @@ -783,9 +1161,14 @@ mod tests { .expect_apply_delete_conditions_to() .never(); + let mut mock_config_renderer = MockConfigRenderer::new(); + mock_config_renderer.expect_render_workloads().never(); + let mut server_state = ServerState { state: old_state.clone(), + rendered_workloads: generate_rendered_workloads_from_state(&old_state.desired_state), delete_graph: delete_graph_mock, + config_renderer: mock_config_renderer, }; let result = server_state.update(update_state, update_mask); @@ -810,6 +1193,7 @@ mod tests { let mut server_state = ServerState { state: old_state.clone(), delete_graph: delete_graph_mock, + ..Default::default() }; let result = server_state.update(update_state, update_mask); assert!(result.is_err()); @@ -817,6 +1201,7 @@ mod tests { } // [utest->swdd~update-desired-state-empty-update-mask~1] + // [utest->swdd~server-state-triggers-configuration-rendering-of-workloads~1] #[test] fn utest_server_state_update_state_no_update() { let _ = env_logger::builder().is_test(true).try_init(); @@ -827,9 +1212,16 @@ mod tests { .expect_apply_delete_conditions_to() .never(); + let mut mock_config_renderer = MockConfigRenderer::new(); + mock_config_renderer + .expect_render_workloads() + .once() + .returning(|_, _| Ok(HashMap::new())); + let mut server_state = ServerState { - state: CompleteState::default(), delete_graph: delete_graph_mock, + config_renderer: mock_config_renderer, + ..Default::default() }; let added_deleted_workloads = server_state @@ -840,6 +1232,7 @@ mod tests { } // [utest->swdd~update-desired-state-empty-update-mask~1] + // [utest->swdd~server-state-triggers-configuration-rendering-of-workloads~1] // [utest->swdd~server-detects-new-workload~1] #[test] fn utest_server_state_update_state_new_workloads() { @@ -856,9 +1249,17 @@ mod tests { .once() .return_const(()); + let mut mock_config_renderer = MockConfigRenderer::new(); + let new_state_clone = new_state.desired_state.clone(); + mock_config_renderer + .expect_render_workloads() + .once() + .returning(move |_, _| Ok(generate_rendered_workloads_from_state(&new_state_clone))); + let mut server_state = ServerState { - state: CompleteState::default(), delete_graph: delete_graph_mock, + config_renderer: mock_config_renderer, + ..Default::default() }; let added_deleted_workloads = server_state.update(new_state.clone(), update_mask).unwrap(); @@ -888,11 +1289,12 @@ mod tests { let expected_deleted_workloads: Vec = Vec::new(); assert_eq!(deleted_workloads, expected_deleted_workloads); - assert_eq!(server_state.state, new_state); + assert_eq!(server_state.state.desired_state, new_state.desired_state); } // [utest->swdd~update-desired-state-empty-update-mask~1] // [utest->swdd~server-detects-deleted-workload~1] + // [utest->swdd~server-state-triggers-configuration-rendering-of-workloads~1] #[test] fn utest_server_state_update_state_deleted_workloads() { let _ = env_logger::builder().is_test(true).try_init(); @@ -909,9 +1311,19 @@ mod tests { .once() .return_const(()); + let mut mock_config_renderer = MockConfigRenderer::new(); + mock_config_renderer + .expect_render_workloads() + .once() + .returning(|_, _| Ok(HashMap::new())); + let mut server_state = ServerState { state: current_complete_state.clone(), delete_graph: delete_graph_mock, + rendered_workloads: generate_rendered_workloads_from_state( + ¤t_complete_state.desired_state, + ), + config_renderer: mock_config_renderer, }; let added_deleted_workloads = server_state.update(update_state, update_mask).unwrap(); @@ -942,11 +1354,12 @@ mod tests { }); assert_eq!(deleted_workloads, expected_deleted_workloads); - assert_eq!(server_state.state, CompleteState::default()); + assert_eq!(server_state.state.desired_state, State::default()); } // [utest->swdd~update-desired-state-empty-update-mask~1] // [utest->swdd~server-detects-changed-workload~1] + // [utest->swdd~server-state-triggers-configuration-rendering-of-workloads~1] #[test] fn utest_server_state_update_state_updated_workload() { let _ = env_logger::builder().is_test(true).try_init(); @@ -978,9 +1391,20 @@ mod tests { .once() .return_const(()); + let mut mock_config_renderer = MockConfigRenderer::new(); + let cloned_new_state = new_complete_state.desired_state.clone(); + mock_config_renderer + .expect_render_workloads() + .once() + .returning(move |_, _| Ok(generate_rendered_workloads_from_state(&cloned_new_state))); + let mut server_state = ServerState { state: current_complete_state.clone(), + rendered_workloads: generate_rendered_workloads_from_state( + ¤t_complete_state.desired_state, + ), delete_graph: delete_graph_mock, + config_renderer: mock_config_renderer, }; let added_deleted_workloads = server_state @@ -1005,6 +1429,7 @@ mod tests { // [utest->swdd~server-state-stores-delete-condition~1] // [utest->swdd~server-state-adds-delete-conditions-to-deleted-workload~1] + // [utest->swdd~server-state-triggers-configuration-rendering-of-workloads~1] #[test] fn utest_server_state_update_state_store_and_add_delete_conditions() { let workload = generate_test_workload_spec_with_param( @@ -1058,13 +1483,28 @@ mod tests { .once() .return_const(()); + let mut mock_config_renderer = MockConfigRenderer::new(); + let cloned_expected_state = new_complete_state.desired_state.clone(); + mock_config_renderer + .expect_render_workloads() + .once() + .returning(move |_, _| { + Ok(generate_rendered_workloads_from_state( + &cloned_expected_state, + )) + }); + let mut server_state = ServerState { + rendered_workloads: generate_rendered_workloads_from_state( + ¤t_complete_state.desired_state, + ), state: current_complete_state, delete_graph: delete_graph_mock, + config_renderer: mock_config_renderer, }; let added_deleted_workloads = server_state - .update(new_complete_state.clone(), update_mask) + .update(new_complete_state, update_mask) .unwrap(); assert!(added_deleted_workloads.is_some()); } diff --git a/tests/resources/ankaios.resource b/tests/resources/ankaios.resource index 8f3e068d7..0ae60cb74 100644 --- a/tests/resources/ankaios.resource +++ b/tests/resources/ankaios.resource @@ -466,6 +466,13 @@ list of workloads shall be empty @{list_result}= Create List Should Be Empty item=${list_result} +the configs field inside the state shall be empty + ${command_result}= Run Command %{ANK_BIN_DIR}${/}ank get state -o json timeout=5 + ${result_dict}= Json To Dict ${command_result.stdout} + ${desiredState}= Get From Dictionary ${result_dict} desiredState + ${configs}= Get From Dictionary ${desiredState} configs + Should Be Empty ${configs} + the container of workload "${workload_name}" shall have a different id but same configuration on the podman runtime ${id_changed}= Set Variable ${False} ${start_time}= Get Time Secs diff --git a/tests/resources/configs/invalid_templated_manifest.yaml b/tests/resources/configs/invalid_templated_manifest.yaml new file mode 100644 index 000000000..7b379f243 --- /dev/null +++ b/tests/resources/configs/invalid_templated_manifest.yaml @@ -0,0 +1,13 @@ +apiVersion: v0.1 +workloads: + nginx: + agent: agent_A + runtime: podman + configs: + port: web_server_config + runtimeConfig: | + image: ghcr.io/eclipse-ankaios/tests/nginx:alpine-slim + commandOptions: [ "-p", "{{#each port}}{{this}}{{/invalid_closing}}:80"] +configs: + web_server_config: + access_port: "8081" diff --git a/tests/resources/configs/manifest_with_configs.yaml b/tests/resources/configs/manifest_with_configs.yaml new file mode 100644 index 000000000..8ca9b4990 --- /dev/null +++ b/tests/resources/configs/manifest_with_configs.yaml @@ -0,0 +1,29 @@ +apiVersion: v0.1 +workloads: + nginx: + agent: "{{agent.agent_name}}" + runtime: podman + configs: + port: web_server_config + agent: agents + runtimeConfig: | + image: ghcr.io/eclipse-ankaios/tests/nginx:alpine-slim + commandOptions: [ "-p", "{{port.access_port}}:80"] + greeting_person: + agent: agent_A + runtime: podman + configs: + person: person + runtimeConfig: | + image: ghcr.io/eclipse-ankaios/tests/alpine:latest + commandArgs: [ "echo", '{{#each person}}{{#if (eq this.age "40")}}Hello {{this.name}}(age: {{this.age}})\n{{/if}}{{/each}}' ] +configs: + web_server_config: + access_port: "8081" + person: + - name: John Doe + age: "30" + - name: Chris Smith + age: "40" + agents: + agent_name: agent_A diff --git a/tests/resources/configs/manifest_with_configs_updated_config_item.yaml b/tests/resources/configs/manifest_with_configs_updated_config_item.yaml new file mode 100644 index 000000000..c6ef205e4 --- /dev/null +++ b/tests/resources/configs/manifest_with_configs_updated_config_item.yaml @@ -0,0 +1,37 @@ +desiredState: + apiVersion: v0.1 + workloads: + greeting_person: + agent: agent_A + tags: [] + dependencies: {} + restartPolicy: NEVER + runtime: podman + runtimeConfig: | + image: ghcr.io/eclipse-ankaios/tests/alpine:latest + commandArgs: [ "echo", '{{#each person}}{{#if (eq this.age "40")}}Hello {{this.name}}(age: {{this.age}})\n{{/if}}{{/each}}' ] + configs: + person: person + nginx: + agent: '{{agent.agent_name}}' + tags: [] + dependencies: {} + restartPolicy: NEVER + runtime: podman + runtimeConfig: | + image: ghcr.io/eclipse-ankaios/tests/nginx:alpine-slim + commandOptions: [ "-p", "{{port.access_port}}:80"] + configs: + agent: agents + port: web_server_config + configs: + agents: + agent_name: agent_A + person: + - age: '30' + name: John Doe + - age: '40' + name: Chris Smith + web_server_config: + access_port: '8082' + diff --git a/tests/resources/configs/update_state_invalid_config_item_key.yaml b/tests/resources/configs/update_state_invalid_config_item_key.yaml new file mode 100644 index 000000000..0fe822fe2 --- /dev/null +++ b/tests/resources/configs/update_state_invalid_config_item_key.yaml @@ -0,0 +1,8 @@ +desiredState: + apiVersion: v0.1 + workloads: {} + configs: + web_server_config: + access_port: '8085' + another$%invalid/config: + config_key: config_value diff --git a/tests/resources/configs/update_state_invalid_workload_config_alias.yaml b/tests/resources/configs/update_state_invalid_workload_config_alias.yaml new file mode 100644 index 000000000..5d4c58ec2 --- /dev/null +++ b/tests/resources/configs/update_state_invalid_workload_config_alias.yaml @@ -0,0 +1,16 @@ +desiredState: + apiVersion: v0.1 + workloads: + workload_invalid_config_alias: + agent: agent_A + dependencies: {} + restartPolicy: NEVER + runtime: podman + runtimeConfig: | + image: ghcr.io/eclipse-ankaios/tests/alpine:latest + commandOptions: [ "echo", "Port: {{web_config.port}}"] + configs: + web.$_config: web_server_config + configs: + web_server_config: + port: '8085' diff --git a/tests/resources/configs/update_state_invalid_workload_config_reference_key.yaml b/tests/resources/configs/update_state_invalid_workload_config_reference_key.yaml new file mode 100644 index 000000000..3cde79640 --- /dev/null +++ b/tests/resources/configs/update_state_invalid_workload_config_reference_key.yaml @@ -0,0 +1,16 @@ +desiredState: + apiVersion: v0.1 + workloads: + workload_invalid_config_reference: + agent: agent_A + dependencies: {} + restartPolicy: NEVER + runtime: podman + runtimeConfig: | + image: ghcr.io/eclipse-ankaios/tests/alpine:latest + commandOptions: [ "echo", "Port: {{web_config.port}}"] + configs: + web_config: web_server$%_config + configs: + web_server_config: + port: '8085' diff --git a/tests/stests/manifests/apply_manifests.robot b/tests/stests/manifests/apply_manifests.robot index da8864dbd..dbf39ccc9 100644 --- a/tests/stests/manifests/apply_manifests.robot +++ b/tests/stests/manifests/apply_manifests.robot @@ -161,6 +161,8 @@ Test Ankaios apply workload specifications via Ankaios Manifest files for deleti And the workload "nginx_from_manifest2" shall not exist within "20" seconds [Teardown] Clean up Ankaios +# [stest->swdd~cli-apply-send-update-state~1] +# [stest->swdd~cli-apply-accepts-ankaios-manifest-content-from-stdin~1] Test Ankaios apply workload specifications via Ankaios Manifest content through stdin for deletion [Setup] Run Keywords Setup Ankaios ... AND Set Global Variable ${simple_yaml_file} ${CONFIGS_DIR}/simple.yaml @@ -177,3 +179,20 @@ Test Ankaios apply workload specifications via Ankaios Manifest content through And the workload "nginx_from_manifest1" shall not exist within "20" seconds [Teardown] Clean up Ankaios +# [stest->swdd~cli-apply-send-update-state~1] +Test Ankaios apply workload specifications in Ankaios manifest with templated fields + [Setup] Run Keywords Setup Ankaios + + # Preconditions + # This test assumes that all containers in Podman have been created with this test -> clean it up first + Given Podman has deleted all existing containers + And Ankaios server is started without config + And Ankaios agent is started with name "agent_A" + # Actions + When user triggers "ank apply ${CONFIGS_DIR}/manifest_with_configs.yaml" + # Asserts + Then the last command shall finish with exit code "0" + And the workload "nginx" shall have the execution state "Running(Ok)" on agent "agent_A" within "20" seconds + And the workload "greeting_person" shall have the execution state "Succeeded(Ok)" on agent "agent_A" within "20" seconds + [Teardown] Clean up Ankaios + diff --git a/tests/stests/workloads/update_workload_podman.robot b/tests/stests/workloads/update_workload_podman.robot index 474e9a45b..3745d7c6e 100644 --- a/tests/stests/workloads/update_workload_podman.robot +++ b/tests/stests/workloads/update_workload_podman.robot @@ -77,7 +77,7 @@ Test Ankaios Podman Update workload with invalid api version # Actions When user triggers "ank -k get workloads" Then list of workloads shall be empty - When user triggers "ank -k set state ${CONFIGS_DIR}/update_state_invalid_version.yaml desiredState" + When user triggers "ank -k set state desiredState ${CONFIGS_DIR}/update_state_invalid_version.yaml" And user triggers "ank -k get workloads" Then list of workloads shall be empty @@ -96,7 +96,7 @@ Test Ankaios Podman Update workload with invalid workload name # Actions When user triggers "ank -k get workloads" Then list of workloads shall be empty - When user triggers "ank -k set state ${CONFIGS_DIR}/update_state_invalid_names.yaml desiredState" + When user triggers "ank -k set state desiredState ${CONFIGS_DIR}/update_state_invalid_names.yaml" And user triggers "ank -k get workloads" Then list of workloads shall be empty @@ -115,7 +115,7 @@ Test Ankaios Podman Update workload with lengthy workload name # Actions When user triggers "ank -k get workloads" Then list of workloads shall be empty - When user triggers "ank -k set state ${CONFIGS_DIR}/update_state_long_names.yaml desiredState" + When user triggers "ank -k set state desiredState ${CONFIGS_DIR}/update_state_long_names.yaml" And user triggers "ank -k get workloads" Then list of workloads shall be empty @@ -135,7 +135,7 @@ Test Ankaios Podman Update workload with invalid agent name # Actions When user triggers "ank -k get workloads" Then list of workloads shall be empty - When user triggers "ank -k set state ${CONFIGS_DIR}/update_state_invalid_names.yaml desiredState.workloads.nginx.agent" + When user triggers "ank -k set state desiredState.workloads.nginx ${CONFIGS_DIR}/update_state_invalid_names.yaml" And user triggers "ank -k get workloads" Then list of workloads shall be empty diff --git a/tests/stests/workloads/workload_configs_podman.robot b/tests/stests/workloads/workload_configs_podman.robot new file mode 100644 index 000000000..d20cfbc4a --- /dev/null +++ b/tests/stests/workloads/workload_configs_podman.robot @@ -0,0 +1,112 @@ +# Copyright (c) 2023 Elektrobit Automotive GmbH +# +# This program and the accompanying materials are made available under the +# terms of the Apache License, Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0. +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# SPDX-License-Identifier: Apache-2.0 + + +*** Settings *** +Documentation Test of different cases related to workloads and their rendered configuration. + +Resource ../../resources/ankaios.resource +Resource ../../resources/variables.resource + + +*** Variables *** +${start_up_yaml_file} ${EMPTY} +${new_state_yaml_file} ${EMPTY} + + +*** Test Cases *** +# [stest->swdd~server-state-compares-rendered-workloads~1] +Test Ankaios start up with templated Ankaios manifest and update state with updated config item + [Setup] Run Keywords Setup Ankaios + ... AND Set Global Variable ${start_up_yaml_file} ${CONFIGS_DIR}/manifest_with_configs.yaml + ... AND Set Global Variable ${new_state_yaml_file} ${CONFIGS_DIR}/manifest_with_configs_updated_config_item.yaml + + # Preconditions + # This test assumes that all Podman containers have been created with this test -> clean it up first + Given Podman has deleted all existing containers + And Ankaios server is started with config "${start_up_yaml_file}" + And Ankaios agent is started with name "agent_A" + And all workloads of agent "agent_A" have an initial execution state + And the command "curl localhost:8081" finished with exit code "0" + # Actions + When user triggers "ank -k set state desiredState.configs ${new_state_yaml_file}" + # Asserts + Then the workload "nginx" shall have the execution state "Running(Ok)" on agent "agent_A" within "20" seconds + And the command "curl localhost:8082" shall finish with exit code "0" within "10" seconds + [Teardown] Clean up Ankaios + +# [stest->swdd~common-config-item-key-naming-convention~1] +# [stest->swdd~server-naming-convention~1] +Test Ankaios update configs with invalid config item key + [Setup] Run Keywords Setup Ankaios + + # Preconditions + # This test assumes that all Podman containers have been created with this test -> clean it up first + Given Podman has deleted all existing containers + And Ankaios server is started without config + And Ankaios agent is started with name "agent_A" + # Actions + Then the configs field inside the state shall be empty + When user triggers "ank -k set state desiredState.configs ${CONFIGS_DIR}/update_state_invalid_config_item_key.yaml" + Then the configs field inside the state shall be empty + + [Teardown] Clean up Ankaios + +# [stest->swdd~common-config-aliases-and-config-reference-keys-naming-convention~1] +# [stest->swdd~server-naming-convention~1] +Test Ankaios update workload with invalid config alias + [Setup] Run Keywords Setup Ankaios + + # Preconditions + # This test assumes that all Podman containers have been created with this test -> clean it up first + Given Podman has deleted all existing containers + And Ankaios server is started without config + And Ankaios agent is started with name "agent_A" + # Actions + Then the configs field inside the state shall be empty + When user triggers "ank -k set state desiredState ${CONFIGS_DIR}/update_state_invalid_workload_config_alias.yaml" + Then the configs field inside the state shall be empty + + [Teardown] Clean up Ankaios + +# [stest->swdd~common-config-aliases-and-config-reference-keys-naming-convention~1] +# [stest->swdd~server-naming-convention~1] +Test Ankaios update workload with invalid config reference key + [Setup] Run Keywords Setup Ankaios + + # Preconditions + # This test assumes that all Podman containers have been created with this test -> clean it up first + Given Podman has deleted all existing containers + And Ankaios server is started without config + And Ankaios agent is started with name "agent_A" + # Actions + Then the configs field inside the state shall be empty + When user triggers "ank -k set state desiredState ${CONFIGS_DIR}/update_state_invalid_workload_config_reference_key.yaml" + Then the configs field inside the state shall be empty + + [Teardown] Clean up Ankaios + +# [stest->swdd~server-fails-on-invalid-startup-state~1] +Test Ankaios start up fails with invalid templated Ankaios manifest + [Setup] Run Keywords Setup Ankaios + + # Preconditions + # This test assumes that all Podman containers have been created with this test -> clean it up first + Given Podman has deleted all existing containers + # Actions + # Manifest contains invalid template string syntax + And Ankaios server is started with an invalid config "${CONFIGS_DIR}/invalid_templated_manifest.yaml" + # Asserts + Then the Ankaios server shall exit with an error code + [Teardown] Clean up Ankaios