diff --git a/.cargo/config.toml b/.cargo/config.toml new file mode 100644 index 0000000..560e295 --- /dev/null +++ b/.cargo/config.toml @@ -0,0 +1,8 @@ +[build] +target = "x86_64-unknown-linux-musl" + +[target.aarch64-unknown-linux-musl] +linker="aarch64-linux-gnu-gcc" + +[target.x86_64-unknown-linux-musl] +linker="x86_64-linux-gnu-gcc" diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile new file mode 100644 index 0000000..4c8abcd --- /dev/null +++ b/.devcontainer/Dockerfile @@ -0,0 +1,24 @@ +# Copyright (c) 2024 Elektrobit Automotive GmbH +# +# This program and the accompanying materials are made available under the +# terms of the Apache License, Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0. +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# SPDX-License-Identifier: Apache-2.0 + +FROM ghcr.io/eclipse-ankaios/devcontainer-base:0.10.2 + +ARG USERNAME=vscode + +# add some convinence aliases +RUN echo 'alias ll="ls -la"' | tee -a /home/${USERNAME}/.bashrc /home/${USERNAME}/.zshrc +RUN echo 'alias ..="cd .."' | tee -a /home/${USERNAME}/.bashrc /home/${USERNAME}/.zshrc +RUN echo 'alias ...="cd ../.."' | tee -a /home/${USERNAME}/.bashrc /home/${USERNAME}/.zshrc + +USER ${USERNAME} diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 0000000..31854cf --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,53 @@ +// For format details, see https://aka.ms/devcontainer.json. For config options, see the +// README at: https://github.com/devcontainers/templates/tree/main/src/rust +{ + "name": "Ankaios sdk dev", + "build": { + "dockerfile": "Dockerfile" + }, + "mounts": [ + ], + "runArgs": ["--privileged"], + "customizations": { + "vscode": { + // Set *default* container specific settings.json values on container create. + "settings": { + "lldb.executable": "/usr/bin/lldb", + // VS Code don't watch files under ./target + "files.watcherExclude": { + "**/target/**": true + }, + "rust-analyzer.checkOnSave.command": "clippy" + }, + // Add the IDs of extensions you want installed when the container is created. + "extensions": [ + "vadimcn.vscode-lldb", + "mutantdino.resourcemonitor", + "rust-lang.rust-analyzer", + "tamasfe.even-better-toml", + "timonwong.shellcheck", + "eamodio.gitlens", + "streetsidesoftware.code-spell-checker", + // "jebbs.plantuml", + // "ms-python.python", + // "ms-python.vscode-pylance", + // "yzhang.markdown-all-in-one", + // "zxh404.vscode-proto3", + // "bierner.markdown-preview-github-styles", + // "hediet.vscode-drawio", + // "redhat.vscode-yaml", + // "bierner.markdown-mermaid", + // "d-biehl.robotcode", + // "bianxianyang.htmlplay", + // "DavidAnson.vscode-markdownlint", + "EditorConfig.EditorConfig", + "ms-vsliveshare.vsliveshare", + "BarbossHack.crates-io", + "PKief.material-icon-theme" + ] + } + }, + "workspaceMount": "source=${localWorkspaceFolder},target=/workspaces/ank-rust/,type=bind", + "workspaceFolder": "/workspaces/ank-rust/", + "remoteUser": "vscode" +} diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..cb117fd --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +Cargo.lock +target \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000..6614a03 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,9 @@ +{ + "cSpell.words": [ + "Elektrobit", + "Ankaios", + "proto", + "utest", + "substate", + ] +} \ No newline at end of file diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..45e68dd --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,94 @@ +# Community Code of Conduct + +**Version 2.0 +January 1, 2023** + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as community members, contributors, Committers[^1], and Project Leads (collectively "Contributors") pledge to make participation in our projects and our community a harassment-free and inclusive experience for everyone. + +This Community Code of Conduct ("Code") outlines our behavior expectations as members of our community in all Eclipse Foundation activities, both offline and online. It is not intended to govern scenarios or behaviors outside of the scope of Eclipse Foundation activities. Nor is it intended to replace or supersede the protections offered to all our community members under the law. Please follow both the spirit and letter of this Code and encourage other Contributors to follow these principles into our work. Failure to read or acknowledge this Code does not excuse a Contributor from compliance with the Code. + +## Our Standards + +Examples of behavior that contribute to creating a positive and professional environment include: + +- Using welcoming and inclusive language; +- Actively encouraging all voices; +- Helping others bring their perspectives and listening actively. If you find yourself dominating a discussion, it is especially important to encourage other voices to join in; +- Being respectful of differing viewpoints and experiences; +- Gracefully accepting constructive criticism; +- Focusing on what is best for the community; +- Showing empathy towards other community members; +- Being direct but professional; and +- Leading by example by holding yourself and others accountable + +Examples of unacceptable behavior by Contributors include: + +- The use of sexualized language or imagery; +- Unwelcome sexual attention or advances; +- Trolling, insulting/derogatory comments, and personal or political attacks; +- Public or private harassment, repeated harassment; +- Publishing others' private information, such as a physical or electronic address, without explicit permission; +- Violent threats or language directed against another person; +- Sexist, racist, or otherwise discriminatory jokes and language; +- Posting sexually explicit or violent material; +- Sharing private content, such as emails sent privately or non-publicly, or unlogged forums such as IRC channel history; +- Personal insults, especially those using racist or sexist terms; +- Excessive or unnecessary profanity; +- Advocating for, or encouraging, any of the above behavior; and +- Other conduct which could reasonably be considered inappropriate in a professional setting + +## Our Responsibilities + +With the support of the Eclipse Foundation employees, consultants, officers, and directors (collectively, the "Staff"), Committers, and Project Leads, the Eclipse Foundation Conduct Committee (the "Conduct Committee") is responsible for clarifying the standards of acceptable behavior. The Conduct Committee takes appropriate and fair corrective action in response to any instances of unacceptable behavior. + +## Scope + +This Code applies within all Project, Working Group, and Interest Group spaces and communication channels of the Eclipse Foundation (collectively, "Eclipse spaces"), within any Eclipse-organized event or meeting, and in public spaces when an individual is representing an Eclipse Foundation Project, Working Group, Interest Group, or their communities. Examples of representing a Project or community include posting via an official social media account, personal accounts, or acting as an appointed representative at an online or offline event. Representation of Projects, Working Groups, and Interest Groups may be further defined and clarified by Committers, Project Leads, or the Staff. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the Conduct Committee via .org. All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. Without the explicit consent of the reporter, the Conduct Committee is obligated to maintain confidentiality with regard to the reporter of an incident. The Conduct Committee is further obligated to ensure that the respondent is provided with sufficient information about the complaint to reply. If such details cannot be provided while maintaining confidentiality, the Conduct Committee will take the respondent‘s inability to provide a defense into account in its deliberations and decisions. Further details of enforcement guidelines may be posted separately. + +Staff, Committers and Project Leads have the right to report, remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code, or to block temporarily or permanently any Contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. Any such actions will be reported to the Conduct Committee for transparency and record keeping. + +Any Staff (including officers and directors of the Eclipse Foundation), Committers, Project Leads, or Conduct Committee members who are the subject of a complaint to the Conduct Committee will be recused from the process of resolving any such complaint. + +## Responsibility + +The responsibility for administering this Code rests with the Conduct Committee, with oversight by the Executive Director and the Board of Directors. For additional information on the Conduct Committee and its process, please write to . + +## Investigation of Potential Code Violations + +All conflict is not bad as a healthy debate may sometimes be necessary to push us to do our best. It is, however, unacceptable to be disrespectful or offensive, or violate this Code. If you see someone engaging in objectionable behavior violating this Code, we encourage you to address the behavior directly with those involved. If for some reason, you are unable to resolve the matter or feel uncomfortable doing so, or if the behavior is threatening or harassing, please report it following the procedure laid out below. + +Reports should be directed to . It is the Conduct Committee’s role to receive and address reported violations of this Code and to ensure a fair and speedy resolution. + +The Eclipse Foundation takes all reports of potential Code violations seriously and is committed to confidentiality and a full investigation of all allegations. The identity of the reporter will be omitted from the details of the report supplied to the accused. Contributors who are being investigated for a potential Code violation will have an opportunity to be heard prior to any final determination. Those found to have violated the Code can seek reconsideration of the violation and disciplinary action decisions. Every effort will be made to have all matters disposed of within 60 days of the receipt of the complaint. + +## Actions + +Contributors who do not follow this Code in good faith may face temporary or permanent repercussions as determined by the Conduct Committee. + +This Code does not address all conduct. It works in conjunction with our [Communication Channel Guidelines](https://www.eclipse.org/org/documents/communication-channel-guidelines/), [Social Media Guidelines](https://www.eclipse.org/org/documents/social_media_guidelines.php), [Bylaws](https://www.eclipse.org/org/documents/eclipse-foundation-be-bylaws-en.pdf), and [Internal Rules](https://www.eclipse.org/org/documents/ef-be-internal-rules.pdf) which set out additional protections for, and obligations of, all contributors. The Foundation has additional policies that provide further guidance on other matters. + +It’s impossible to spell out every possible scenario that might be deemed a violation of this Code. Instead, we rely on one another’s good judgment to uphold a high standard of integrity within all Eclipse Spaces. Sometimes, identifying the right thing to do isn’t an easy call. In such a scenario, raise the issue as early as possible. + +## No Retaliation + +The Eclipse community relies upon and values the help of Contributors who identify potential problems that may need to be addressed within an Eclipse Space. Any retaliation against a Contributor who raises an issue honestly is a violation of this Code. That a Contributor has raised a concern honestly or participated in an investigation, cannot be the basis for any adverse action, including threats, harassment, or discrimination. If you work with someone who has raised a concern or provided information in an investigation, you should continue to treat the person with courtesy and respect. If you believe someone has retaliated against you, report the matter as described by this Code. Honest reporting does not mean that you have to be right when you raise a concern; you just have to believe that the information you are providing is accurate. + +False reporting, especially when intended to retaliate or exclude, is itself a violation of this Code and will not be accepted or tolerated. + +Everyone is encouraged to ask questions about this Code. Your feedback is welcome, and you will get a response within three business days. Write to . + +## Amendments + +The Eclipse Foundation Board of Directors may amend this Code from time to time and may vary the procedures it sets out where appropriate in a particular case. + +### Attribution + +This Code was inspired by the [Contributor Covenant](https://www.contributor-covenant.org/), version 1.4, available [here](https://www.contributor-covenant.org/version/1/4/code-of-conduct/). + +[^1]: Capitalized terms used herein without definition shall have the meanings assigned to them in the Bylaws. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..21624f4 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,27 @@ +# Contributing + +Welcome to the Ankaios community. Start here for info on how to contribute and help improve our project. +Please observe our [Community Code of Conduct](./CODE_OF_CONDUCT.md). + +## How to Contribute + +This project welcomes contributions and suggestions. +You'll also need to create an [Eclipse Foundation account](https://accounts.eclipse.org/) and agree to the [Eclipse Contributor Agreement](https://www.eclipse.org/legal/ECA.php). See more info at . + +If you have a bug to report or a feature to suggest, please use the New Issue button on the Issues page to access templates for these items. + +Code contributions are to be submitted via pull requests. +For this fork this repository, apply the suggested changes and create a +pull request to integrate them. +Before creating the request, please ensure the following which we will check +besides a technical review: + +- **No breaks**: All builds and tests pass (GitHub actions). +- **Docs updated**: Make sure any changes and additions are appropriately included into the design and user documentation. +- **Requirements**: Make sure that requirements are created for new features and those are [traced in the code and tests](https://eclipse-ankaios.github.io/ankaios/main/development/requirement-tracing/). +- **Rust coding guidelines**: Make sure to follow the [Rust coding guidelines for this project](https://eclipse-ankaios.github.io/ankaios/main/development/rust-coding-guidelines/). +- **Unit verification strategy**: Unit tests have been created according to the [unit verification strategy](https://eclipse-ankaios.github.io/ankaios/main/development/unit-verification/). + +## Communication + +Please join our [developer mailing list](https://accounts.eclipse.org/mailing-list/ankaios-dev) for up to date information or use the Ankaios [discussion forum](https://github.com/eclipse-ankaios/ankaios/discussions). diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 0000000..cf73435 --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "ankaios_sdk" +version = "0.5.0-rc1" +edition = "2021" +license = "Apache-2.0" +authors = ["Elektrobit Automotive GmbH and Ankaios contributors"] +description = "Eclipse Ankaios Rust SDK - provides a convenient Rust interface for interacting with the Ankaios platform." +documentation = "https://eclipse-ankaios.github.io/ankaios" # TODO +repository = "https://github.com/eclipse-ankaios/ank-sdk-rust" +readme = "README.md" +keywords = ["ankaios", "eclipse", "elektrobit", "automotive", "sdk", "rust"] +categories = [] # https://crates.io/categories +exclude = ["/tests", "/.github"] + +[dependencies] +api = { git = "https://github.com/eclipse-ankaios/ankaios.git", tag = "v0.5.0", subdir = "api" } +prost = "0.11" +tonic = "0.9" +log = "0.4" +serde = { version = "1.0", features = ["derive"] } +serde_yaml = "0.9" +thiserror = "1.0" + +[build-dependencies] +tonic-build = "0.9" + +[features] +default = [] +test_utils = [] \ No newline at end of file diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/README.md b/README.md index d9db0f4..19b286f 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,29 @@ -# ank-sdk-rust -Eclipse Ankaios Rust SDK - provides a convenient Rust interface for interacting with the Ankaios platform. + + + + Shows Ankaios logo + + +# Ankaios Rust SDK for Eclipse Ankaios + +Eclipse Ankaios provides workload and container orchestration for automotive +High Performance Computing Software (HPCs). While it can be used for various +fields of applications, it is developed from scratch for automotive use cases +and provides a slim yet powerful solution to manage containerized applications. + +The Rust SDK provides easy access from the container (workload) point-of-view +to manage the Ankaios system. A workload can use the Rust SDK to run other workloads +and get the state of the Ankaios system. + +## Installation + +## Usage + +## Contributing + +This project welcomes contributions and suggestions. Before contributing, make sure to read the +[contribution guideline](CONTRIBUTING.md). + +## License + +Ankaios Rust SDK is licensed using the Apache License Version 2.0. \ No newline at end of file diff --git a/justfile b/justfile new file mode 100644 index 0000000..b9a5259 --- /dev/null +++ b/justfile @@ -0,0 +1,62 @@ +#!/bin/bash + +# List all available commands +help: + just -l + +# Build SDK +build: + cargo build + +# Build SDK in release mode +build-release: + cargo build --release + +# Clean the build directory +clean: + cargo clean + rm -rf build + +# Run unit tests +utest: + cargo test + +# Run code coverage +cov: + cargo llvm-cov + +# Generate code coverage HTML +cov-html: + cargo llvm-cov --html + +# Open code coverage HTML +cov-open: + python3 -m http.server -d target/llvm-cov/html 8000 + +# Open code coverage HTML as a server, depending on the method +# cov-open method="server": +# if [[ "{{method}}" == "server" ]]; then +# if command -v python3 > /dev/null; then +# python3 -m http.server -d target/llvm-cov/html 8000 +# else +# echo "Error: python3 is not available for running the server." +# exit 1 +# fi +# elif [[ "{{method}}" == "xdg" ]]; then +# if command -v xdg-open > /dev/null; then +# xdg-open target/llvm-cov/html/index.html +# else +# echo "Error: xdg-open is not available." +# exit 1 +# fi +# elif [[ "{{method}}" == "open" ]]; then +# if command -v open > /dev/null; then +# open target/llvm-cov/html/index.html +# else +# echo "Error: open is not available." +# exit 1 +# fi +# else +# echo "Error: Invalid method. Use 'server', 'xdg', or 'open'." +# exit 1 +# fi diff --git a/src/ankaios.rs b/src/ankaios.rs new file mode 100644 index 0000000..bc7eb90 --- /dev/null +++ b/src/ankaios.rs @@ -0,0 +1,54 @@ +// Copyright (c) 2024 Elektrobit Automotive GmbH +// +// This program and the accompanying materials are made available under the +// terms of the Apache License, Version 2.0 which is available at +// https://www.apache.org/licenses/LICENSE-2.0. +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. +// +// SPDX-License-Identifier: Apache-2.0 + +use crate::AnkaiosError; + + +pub struct Ankaios{ + // TODO +} + +impl Ankaios { + pub fn new() -> Self { + Self{} + } + + pub fn print(&self) { + println!("Hello from Ankaios"); + } +} + +impl Default for Ankaios { + fn default() -> Self { + Self::new() + } +} + +////////////////////////////////////////////////////////////////////////////// +// ######## ####### ######### ######### // +// ## ## ## ## // +// ## ##### ######### ## // +// ## ## ## ## // +// ## ####### ######### ## // +////////////////////////////////////////////////////////////////////////////// + +#[cfg(test)] +mod tests { + use super::Ankaios; + + #[test] + fn test_ankaios() { + let _ = Ankaios::new(); + } +} \ No newline at end of file diff --git a/src/components/complete_state.rs b/src/components/complete_state.rs new file mode 100644 index 0000000..2ad17d4 --- /dev/null +++ b/src/components/complete_state.rs @@ -0,0 +1,393 @@ +// Copyright (c) 2024 Elektrobit Automotive GmbH +// +// This program and the accompanying materials are made available under the +// terms of the Apache License, Version 2.0 which is available at +// https://www.apache.org/licenses/LICENSE-2.0. +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. +// +// SPDX-License-Identifier: Apache-2.0 + +use std::fmt; +use std::collections::HashMap; + +pub use api::ank_base; +use crate::components::workload_mod::Workload; +use crate::components::workload_state_mod::WorkloadStateCollection; +use crate::components::manifest::Manifest; +use crate::AnkaiosError; + +const SUPPORTED_API_VERSION: &str = "v0.1"; + +#[derive(Debug, Default)] +pub struct CompleteState{ + complete_state: ank_base::CompleteState, + workloads: Vec, + workload_state_collection: WorkloadStateCollection, + configs: HashMap +} + +impl CompleteState { + pub fn new() -> Self { + let mut obj = Self::default(); + obj.set_api_version(SUPPORTED_API_VERSION.to_string()); + obj + } + + pub fn new_from_manifest(manifest: &Manifest) -> Self { + let dict_state = manifest.to_dict(); + let mut obj = Self::new(); + obj.set_api_version(dict_state.get("apiVersion").unwrap().as_str().unwrap()); + if let Some(workloads) = dict_state.get("workloads") { + for (workload_name, workload) in workloads.as_mapping().unwrap().iter() { + let workload = workload.as_mapping().unwrap(); + let workload = Workload::new_from_dict(workload_name.as_str().unwrap(), workload.clone()); + obj.add_workload(workload.unwrap()); + } + } + if let Some(configs) = dict_state.get("configs") { + let mut config_map = HashMap::new(); + for (k, v) in configs.as_mapping().unwrap().iter() { + config_map.insert(k.as_str().unwrap().to_string(), v.clone()); + } + obj.set_configs(config_map); + } + obj + } + + pub fn new_from_proto(proto: ank_base::CompleteState) -> Self { + let mut obj = Self::new(); + obj.complete_state = proto.clone(); + + fn from_config_item(config_item: &ank_base::ConfigItem) -> serde_yaml::Value { + match &config_item.config_item { + Some(ank_base::config_item::ConfigItem::String(val)) => serde_yaml::Value::String( + val.clone() + ), + Some(ank_base::config_item::ConfigItem::Array(val)) => serde_yaml::Value::Sequence( + val.values + .iter() + .map(from_config_item) + .collect() + ), + Some(ank_base::config_item::ConfigItem::Object(val)) => serde_yaml::Value::Mapping( + val.fields + .iter() + .map(|(k, v)| ( + serde_yaml::Value::String(k.clone()), from_config_item(v)) + ) + .collect() + ), + None => serde_yaml::Value::Null, + } + } + if let Some(configs) = proto.desired_state.as_ref().unwrap().configs.as_ref() { + obj.configs = configs.configs.iter().map(|(k, v)| (k.clone(), from_config_item(v))).collect(); + } + + if let Some(workloads) = proto.desired_state.as_ref().unwrap().workloads.as_ref() { + for (workload_name, workload) in workloads.workloads.iter() { + obj.workloads.push(Workload::new_from_proto(workload_name, workload.clone())); + } + } + + if let Some(workload_states) = proto.workload_states.as_ref() { + obj.workload_state_collection = WorkloadStateCollection::new_from_proto(workload_states); + } + obj + } + + pub fn to_dict(&self) -> serde_yaml::Mapping { + let mut dict = serde_yaml::Mapping::new(); + dict.insert(serde_yaml::Value::String("apiVersion".to_string()), serde_yaml::Value::String(self.get_api_version())); + let mut workloads = serde_yaml::Mapping::new(); + for workload in self.get_workloads() { + workloads.insert(serde_yaml::Value::String(workload.name.clone()), serde_yaml::Value::Mapping(workload.to_dict())); + } + dict.insert(serde_yaml::Value::String("workloads".to_string()), serde_yaml::Value::Mapping(workloads)); + let mut configs = serde_yaml::Mapping::new(); + for (k, v) in self.get_configs() { + configs.insert(serde_yaml::Value::String(k), v); + } + dict.insert(serde_yaml::Value::String("configs".to_string()), serde_yaml::Value::Mapping(configs)); + let mut agents = serde_yaml::Mapping::new(); + for (agent_name, agent) in self.get_agents() { + let mut agent_dict = serde_yaml::Mapping::new(); + for (k, v) in agent { + agent_dict.insert(serde_yaml::Value::String(k), serde_yaml::Value::String(v)); + } + agents.insert(serde_yaml::Value::String(agent_name), serde_yaml::Value::Mapping(agent_dict)); + } + dict.insert(serde_yaml::Value::String("agents".to_string()), serde_yaml::Value::Mapping(agents)); + dict.insert(serde_yaml::Value::String("workload_states".to_string()), serde_yaml::Value::Mapping(self.workload_state_collection.get_as_dict())); + dict + } + + pub fn to_proto(&self) -> ank_base::CompleteState { + self.complete_state.clone() + } + + fn set_api_version>(&mut self, api_version: T) { + if self.complete_state.desired_state.is_none() { + self.complete_state.desired_state = Some(ank_base::State{ + api_version: api_version.into(), + workloads: None, + configs: None, + }); + } + else { + self.complete_state.desired_state.as_mut().unwrap().api_version = api_version.into(); + } + } + + pub fn get_api_version(&self) -> String { + self.complete_state.desired_state.as_ref().unwrap().api_version.clone() + } + + pub fn add_workload(&mut self, workload: Workload) { + self.workloads.push(workload.clone()); + if self.complete_state.desired_state.as_mut().unwrap().workloads.is_none() { + self.complete_state.desired_state.as_mut().unwrap().workloads = Some(ank_base::WorkloadMap{ + workloads: Default::default(), + }); + } + self.complete_state.desired_state.as_mut().unwrap().workloads.as_mut().unwrap().workloads.insert(workload.name.clone(), workload.to_proto()); + } + + pub fn get_workload>(&self, workload_name: T) -> Option { + let workload_name = workload_name.into(); + self.workloads + .iter() + .find(|workload| workload.name == workload_name) + .cloned() + } + + pub fn get_workloads(&self) -> Vec { + self.workloads.clone() + } + + pub fn get_workload_states(&self) -> &WorkloadStateCollection { + &self.workload_state_collection + } + + pub fn get_agents(&self) -> HashMap> { + let mut agents = HashMap::new(); + if let Some(agent_map) = &self.complete_state.agents { + for (name, attributes) in agent_map.agents.iter() { + let mut agent = HashMap::new(); + if let Some(cpu_usage) = &attributes.cpu_usage { + agent.insert("cpu_usage".to_string(), cpu_usage.cpu_usage.to_string()); + } + if let Some(free_memory) = &attributes.free_memory { + agent.insert("free_memory".to_string(), free_memory.free_memory.to_string()); + } + agents.insert(name.clone(), agent); + } + } + agents + } + + pub fn set_configs(&mut self, configs: HashMap) { + self.configs = configs; + + fn to_config_item(value: &serde_yaml::Value) -> ank_base::ConfigItem { + match value { + serde_yaml::Value::String(val) => ank_base::ConfigItem { + config_item: Some(ank_base::config_item::ConfigItem::String(val.clone())), + }, + serde_yaml::Value::Sequence(val) => ank_base::ConfigItem { + config_item: Some(ank_base::config_item::ConfigItem::Array(ank_base::ConfigArray { + values: val.iter().map(to_config_item).collect(), + })), + }, + serde_yaml::Value::Mapping(val) => ank_base::ConfigItem { + config_item: Some(ank_base::config_item::ConfigItem::Object(ank_base::ConfigObject { + fields: val.iter().map(|(k, v)| (k.as_str().unwrap().to_string(), to_config_item(v))).collect(), + })), + }, + _ => ank_base::ConfigItem { + config_item: None, + }, + } + } + + if self.complete_state.desired_state.as_mut().unwrap().configs.is_none() { + self.complete_state.desired_state.as_mut().unwrap().configs = Some(ank_base::ConfigMap { + configs: Default::default(), + }); + } + self.complete_state.desired_state.as_mut().unwrap().configs.as_mut().unwrap().configs = self.configs.iter().map(|(k, v)| (k.clone(), to_config_item(v))).collect(); + } + + pub fn get_configs(&self) -> HashMap { + self.configs.clone() + } +} + +impl fmt::Display for CompleteState { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self.to_proto()) + } +} + +impl TryFrom<&Manifest> for CompleteState { + type Error = AnkaiosError; + + fn try_from(manifest: &Manifest) -> Result { + Ok(Self::new_from_manifest(manifest)) + } +} + +impl TryFrom for CompleteState { + type Error = AnkaiosError; + + fn try_from(proto: ank_base::CompleteState) -> Result { + Ok(Self::new_from_proto(proto)) + } +} + +////////////////////////////////////////////////////////////////////////////// +// ######## ####### ######### ######### // +// ## ## ## ## // +// ## ##### ######### ## // +// ## ## ## ## // +// ## ####### ######### ## // +////////////////////////////////////////////////////////////////////////////// + +#[cfg(any(feature = "test_utils", test))] +use crate::components::workload_mod::test_helpers::generate_test_workload_proto; + +#[cfg(any(feature = "test_utils", test))] +use crate::components::workload_state_mod::generate_test_workload_states_proto; + +#[cfg(any(feature = "test_utils", test))] +fn generate_test_configs_proto() -> ank_base::ConfigMap { + ank_base::ConfigMap { configs: HashMap::from([ + ("config1".to_string(), ank_base::ConfigItem { + config_item: Some(ank_base::config_item::ConfigItem::String("value1".to_string())), + }), + ("config2".to_string(), ank_base::ConfigItem { + config_item: Some(ank_base::config_item::ConfigItem::Array(ank_base::ConfigArray { + values: vec![ + ank_base::ConfigItem { + config_item: Some(ank_base::config_item::ConfigItem::String("value2".to_string())), + }, + ank_base::ConfigItem { + config_item: Some(ank_base::config_item::ConfigItem::String("value3".to_string())), + }, + ], + })), + }), + ("config3".to_string(), ank_base::ConfigItem { + config_item: Some(ank_base::config_item::ConfigItem::Object(ank_base::ConfigObject { + fields: HashMap::from([ + ("field1".to_string(), ank_base::ConfigItem { + config_item: Some(ank_base::config_item::ConfigItem::String("value4".to_string())), + }), + ("field2".to_string(), ank_base::ConfigItem { + config_item: Some(ank_base::config_item::ConfigItem::String("value5".to_string())), + }), + ]), + })), + }), + ])} +} + +#[cfg(any(feature = "test_utils", test))] +fn generate_agents_proto() -> ank_base::AgentMap { + ank_base::AgentMap { agents: HashMap::from([ + ("agent_A".to_string(), ank_base::AgentAttributes { + cpu_usage: Some(ank_base::CpuUsage { + cpu_usage: 50, + }), + free_memory: Some(ank_base::FreeMemory { + free_memory: 1024, + }), + }), + ])} +} + +#[cfg(any(feature = "test_utils", test))] +fn generate_complete_state_proto() -> ank_base::CompleteState { + ank_base::CompleteState { + desired_state: Some(ank_base::State { + api_version: SUPPORTED_API_VERSION.to_string(), + workloads: Some(ank_base::WorkloadMap { + workloads: HashMap::from([ + ("nginx_test".to_string(), generate_test_workload_proto("agent_A", "podman")), + ]), + }), + configs: Some(generate_test_configs_proto()), + }), + workload_states: Some(generate_test_workload_states_proto()), + agents: Some(generate_agents_proto()), + } +} + +#[cfg(test)] +mod tests { + use std::any::Any; + + use super::{generate_complete_state_proto, CompleteState, SUPPORTED_API_VERSION}; + use crate::components::manifest::generate_test_manifest; + use crate::components::workload_state_mod::WorkloadInstanceName; + + #[test] + fn test_api_version() { + let mut complete_state = CompleteState::new(); + assert_eq!(complete_state.get_api_version(), SUPPORTED_API_VERSION); + complete_state.set_api_version("v0.2"); + assert_eq!(complete_state.get_api_version(), "v0.2"); + } + + #[test] + fn test_proto() { + let complete_state = CompleteState::new_from_proto(generate_complete_state_proto()); + let other_complete_state = CompleteState::new_from_proto(complete_state.to_proto()); + assert_eq!(complete_state.to_string(), other_complete_state.to_string()); + } + + #[test] + fn test_from_manifest() { + let manifest = generate_test_manifest(); + let complete_state = CompleteState::try_from(&manifest).unwrap(); + assert_eq!(complete_state.get_workloads().len(), 1); + assert_eq!(complete_state.get_configs().len(), 3); + assert_eq!(manifest.to_dict().get("workloads").unwrap(), complete_state.to_dict().get("workloads").unwrap()); + assert_eq!(manifest.to_dict().get("configs").unwrap(), complete_state.to_dict().get("configs").unwrap()); + } + + #[test] + fn test_to_dict() { + let complete_state = CompleteState::try_from(generate_complete_state_proto()).unwrap(); + + // Populate the expected mapping + let mut expected_mapping = serde_yaml::Mapping::new(); + expected_mapping.insert(serde_yaml::Value::String("apiVersion".to_string()), serde_yaml::Value::String(SUPPORTED_API_VERSION.to_string())); + let mut workloads = serde_yaml::Mapping::new(); + workloads.insert(serde_yaml::Value::String("nginx_test".to_string()), serde_yaml::Value::Mapping(complete_state.get_workloads()[0].to_dict())); + // TODO + + assert_eq!(complete_state.to_dict().type_id(), expected_mapping.type_id()); + //assert_eq!(complete_state.to_dict(), expected_mapping); + } + + #[test] + fn test_get_workload() { + let complete_state = CompleteState::try_from(generate_complete_state_proto()).unwrap(); + let workload = complete_state.get_workload("nginx_test").unwrap(); + assert_eq!(workload.name, "nginx_test"); + } + + #[test] + fn test_get_workload_states() { + let complete_state = CompleteState::try_from(generate_complete_state_proto()).unwrap(); + let workload_states = complete_state.get_workload_states(); + let workload_instance_name = WorkloadInstanceName::new("agent_A".to_string(), "nginx".to_string(), "1234".to_string()); + assert!(workload_states.get_for_instance_name(&workload_instance_name).is_some()); + } +} \ No newline at end of file diff --git a/src/components/control_interface.rs b/src/components/control_interface.rs new file mode 100644 index 0000000..0778a09 --- /dev/null +++ b/src/components/control_interface.rs @@ -0,0 +1,75 @@ +// Copyright (c) 2024 Elektrobit Automotive GmbH +// +// This program and the accompanying materials are made available under the +// terms of the Apache License, Version 2.0 which is available at +// https://www.apache.org/licenses/LICENSE-2.0. +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. +// +// SPDX-License-Identifier: Apache-2.0 + +use crate::AnkaiosError; + + +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum ControlInterfaceState { + Initialized = 0, + Terminated = 1, + AgentDisconnected = 2, + ConnectionClosed = 3, +} + +pub struct ControlInterface{ + // TODO +} + +impl std::fmt::Display for ControlInterfaceState { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let res_str = match self { + ControlInterfaceState::Initialized => "Initialized", + ControlInterfaceState::Terminated => "Terminated", + ControlInterfaceState::AgentDisconnected => "AgentDisconnected", + ControlInterfaceState::ConnectionClosed => "ConnectionClosed", + }; + write!(f, "{}", res_str) + } +} + +impl ControlInterface { + pub fn new() -> Self { + Self{} + } + + pub fn print(&self) { + println!("I need to be implemented!!"); + } +} + +impl Default for ControlInterface { + fn default() -> Self { + Self::new() + } +} + +////////////////////////////////////////////////////////////////////////////// +// ######## ####### ######### ######### // +// ## ## ## ## // +// ## ##### ######### ## // +// ## ## ## ## // +// ## ####### ######### ## // +////////////////////////////////////////////////////////////////////////////// + +#[cfg(test)] +mod tests { + use super::ControlInterface; + + #[test] + fn test_control_interface() { + let _ = ControlInterface::new(); + } +} \ No newline at end of file diff --git a/src/components/manifest.rs b/src/components/manifest.rs new file mode 100644 index 0000000..8b1e00b --- /dev/null +++ b/src/components/manifest.rs @@ -0,0 +1,188 @@ +// Copyright (c) 2024 Elektrobit Automotive GmbH +// +// This program and the accompanying materials are made available under the +// terms of the Apache License, Version 2.0 which is available at +// https://www.apache.org/licenses/LICENSE-2.0. +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. +// +// SPDX-License-Identifier: Apache-2.0 + +use std::path::Path; +use serde_yaml; +use crate::AnkaiosError; + +// Disable this from coverage +// https://github.com/rust-lang/rust/issues/84605 +#[cfg(not(test))] +fn read_file_to_string(path: &Path) -> Result { + std::fs::read_to_string(path) +} + +#[cfg(test)] +use self::read_to_string_mock as read_file_to_string; + +#[derive(Debug)] +pub struct Manifest{ + manifest: serde_yaml::Value +} + +impl Manifest { + pub fn new(manifest: serde_yaml::Value) -> Result { + let obj = Self{manifest}; + if !obj.check() { + return Err(AnkaiosError::InvalidManifestError("Manifest is not valid".to_string())); + } + Ok(obj) + } + + pub fn from_dict(manifest: serde_yaml::Value) -> Result { + Self::new(manifest) + } + + pub fn from_string>(manifest: T) -> Result { + match serde_yaml::from_str(&manifest.into()) { + Ok(manifest) => Self::from_dict(manifest), + Err(e) => Err(AnkaiosError::InvalidManifestError(e.to_string())) + } + } + + pub fn from_file(path: &Path) -> Result { + match read_file_to_string(path) { + Ok(content) => Self::from_string(content), + Err(e) => Err(AnkaiosError::InvalidManifestError(e.to_string())) + } + } + + pub fn check(&self) -> bool { + if self.manifest.get("apiVersion").is_none() { + return false; + } + let allowed_fields = [ + "runtime", "agent", "restartPolicy", "runtimeConfig", + "dependencies", "tags", "controlInterfaceAccess", "configs" + ]; + let mandatory_fields = ["runtime", "runtimeConfig", "agent"]; + for wl_name in self.manifest["workloads"].as_mapping().unwrap_or(&serde_yaml::Mapping::default()).keys() { + for field in self.manifest["workloads"][wl_name].as_mapping().unwrap_or(&serde_yaml::Mapping::default()).keys() { + if !allowed_fields.contains(&field.as_str().unwrap()) { + return false; + } + } + for field in mandatory_fields.iter() { + if self.manifest["workloads"][wl_name].get(field).is_none() { + return false; + } + } + } + true + } + + fn calculate_masks(&self) -> Vec { + let mut masks = vec![]; + print!("{:?}", self.manifest); + for wl_name in self.manifest["workloads"].as_mapping().unwrap_or(&serde_yaml::Mapping::default()).keys() { + masks.push(format!("desiredState.workloads.{}", wl_name.as_str().unwrap())); + } + for config_name in self.manifest["configs"].as_mapping().unwrap_or(&serde_yaml::Mapping::default()).keys() { + masks.push(format!("desiredState.configs.{}", config_name.as_str().unwrap())); + } + masks + } + + pub fn to_dict(&self) -> serde_yaml::Value { + self.manifest.clone() + } +} + +impl TryFrom for Manifest { + type Error = AnkaiosError; + + fn try_from(value: serde_yaml::Value) -> Result { + Self::from_dict(value) + } +} + +impl TryFrom for Manifest { + type Error = AnkaiosError; + + fn try_from(value: String) -> Result { + Self::from_string(value) + } +} + +impl TryFrom<&Path> for Manifest { + type Error = AnkaiosError; + + fn try_from(value: &Path) -> Result { + Self::from_file(value) + } +} + +////////////////////////////////////////////////////////////////////////////// +// ######## ####### ######### ######### // +// ## ## ## ## // +// ## ##### ######### ## // +// ## ## ## ## // +// ## ####### ######### ## // +////////////////////////////////////////////////////////////////////////////// + +#[cfg(any(feature = "test_utils", test))] +pub fn read_to_string_mock(_path: &Path) -> Result { + Ok(_path.to_str().unwrap().to_string()) +} + +#[cfg(any(feature = "test_utils", test))] +static MANIFEST_CONTENT: &str = r#"apiVersion: v0.1 +workloads: + nginx_test: + runtime: podman + restartPolicy: NEVER + agent: agent_A + configs: + c: config1 + runtimeConfig: | + image: image/test +configs: + config1: \"value1\" + config2: + - \"value2\" + - \"value3\" + config3: + field1: \"value4\" + field2: \"value5\""#; + +#[cfg(test)] +pub fn generate_test_manifest() -> Manifest { + Manifest::from_string(MANIFEST_CONTENT).unwrap() +} + +#[cfg(test)] +mod tests { + use std::path::Path; + use serde_yaml; + use super::{Manifest, MANIFEST_CONTENT}; + + #[test] + fn test_creation() { + let manifest = Manifest::from_file(Path::new(MANIFEST_CONTENT)).unwrap(); + assert_eq!(manifest.manifest["apiVersion"], "v0.1"); + assert_eq!(manifest.calculate_masks(), vec!["desiredState.workloads.nginx_test", "desiredState.configs.config1", "desiredState.configs.config2", "desiredState.configs.config3"]); + + let _ = Manifest::try_from(Path::new("path")); + let _ = Manifest::try_from(MANIFEST_CONTENT.to_string()); + let _ = Manifest::try_from(serde_yaml::Value::default()); + } + + #[test] + fn test_no_workloads() { + let manifest_result = Manifest::from_string("apiVersion: v0.1"); + assert!(manifest_result.is_ok()); + let manifest: Manifest = manifest_result.unwrap(); + assert_eq!(manifest.calculate_masks().len(), 0); + } +} diff --git a/src/components/mod.rs b/src/components/mod.rs new file mode 100644 index 0000000..2b88c72 --- /dev/null +++ b/src/components/mod.rs @@ -0,0 +1,21 @@ +// Copyright (c) 2024 Elektrobit Automotive GmbH +// +// This program and the accompanying materials are made available under the +// terms of the Apache License, Version 2.0 which is available at +// https://www.apache.org/licenses/LICENSE-2.0. +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. +// +// SPDX-License-Identifier: Apache-2.0 + +pub mod workload_mod; +pub mod workload_state_mod; +pub mod manifest; +pub mod complete_state; +pub mod request; +pub mod response; +pub mod control_interface; \ No newline at end of file diff --git a/src/components/request.rs b/src/components/request.rs new file mode 100644 index 0000000..7838694 --- /dev/null +++ b/src/components/request.rs @@ -0,0 +1,71 @@ +// Copyright (c) 2024 Elektrobit Automotive GmbH +// +// This program and the accompanying materials are made available under the +// terms of the Apache License, Version 2.0 which is available at +// https://www.apache.org/licenses/LICENSE-2.0. +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. +// +// SPDX-License-Identifier: Apache-2.0 + +use crate::AnkaiosError; + + +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum RequestType { + UpdateState = 0, + GetState = 1, +} + +pub struct Request{ + // TODO +} + +impl std::fmt::Display for RequestType { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let req_str = match self { + RequestType::UpdateState => "UpdateState", + RequestType::GetState => "GetState", + }; + write!(f, "{}", req_str) + } +} + +impl Request { + pub fn new() -> Self { + Self{} + } + + pub fn print(&self) { + println!("I need to be implemented!!"); + } +} + +impl Default for Request { + fn default() -> Self { + Self::new() + } +} + +////////////////////////////////////////////////////////////////////////////// +// ######## ####### ######### ######### // +// ## ## ## ## // +// ## ##### ######### ## // +// ## ## ## ## // +// ## ####### ######### ## // +////////////////////////////////////////////////////////////////////////////// + +#[cfg(test)] +mod tests { + use super::Request; + + #[test] + fn test_request() { + let _ = Request::new(); + } +} \ No newline at end of file diff --git a/src/components/response.rs b/src/components/response.rs new file mode 100644 index 0000000..6978624 --- /dev/null +++ b/src/components/response.rs @@ -0,0 +1,100 @@ +// Copyright (c) 2024 Elektrobit Automotive GmbH +// +// This program and the accompanying materials are made available under the +// terms of the Apache License, Version 2.0 which is available at +// https://www.apache.org/licenses/LICENSE-2.0. +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. +// +// SPDX-License-Identifier: Apache-2.0 + +use crate::AnkaiosError; + + +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum ResponseType { + Error = 0, + CompleteState = 1, + UpdateStateSuccess = 2, +} + +pub struct Response{ + // TODO +} + +// ResponseEvent? + +pub struct UpdateStateSuccess{ + // TODO +} + +impl std::fmt::Display for ResponseType { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let res_str = match self { + ResponseType::Error => "Error", + ResponseType::CompleteState => "CompleteState", + ResponseType::UpdateStateSuccess => "UpdateStateSuccess", + }; + write!(f, "{}", res_str) + } +} + +impl Response { + pub fn new() -> Self { + Self{} + } + + pub fn print(&self) { + println!("I need to be implemented!!"); + } +} + +impl Default for Response { + fn default() -> Self { + Self::new() + } +} + +impl UpdateStateSuccess { + pub fn new() -> Self { + Self{} + } + + pub fn print(&self) { + println!("I need to be implemented!!"); + } +} + +impl Default for UpdateStateSuccess { + fn default() -> Self { + Self::new() + } +} + +////////////////////////////////////////////////////////////////////////////// +// ######## ####### ######### ######### // +// ## ## ## ## // +// ## ##### ######### ## // +// ## ## ## ## // +// ## ####### ######### ## // +////////////////////////////////////////////////////////////////////////////// + +#[cfg(test)] +mod tests { + use super::{Response, UpdateStateSuccess}; + + #[test] + fn test_response() { + let _ = Response::new(); + } + + #[test] + fn test_update_state_success() { + let _ = UpdateStateSuccess::new(); + } +} \ No newline at end of file diff --git a/src/components/workload_mod/mod.rs b/src/components/workload_mod/mod.rs new file mode 100644 index 0000000..8280723 --- /dev/null +++ b/src/components/workload_mod/mod.rs @@ -0,0 +1,22 @@ +// Copyright (c) 2024 Elektrobit Automotive GmbH +// +// This program and the accompanying materials are made available under the +// terms of the Apache License, Version 2.0 which is available at +// https://www.apache.org/licenses/LICENSE-2.0. +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. +// +// SPDX-License-Identifier: Apache-2.0 + +mod workload; +mod workload_builder; + +pub use workload::Workload; +pub use workload_builder::WorkloadBuilder; + +#[cfg(test)] +pub mod test_helpers; \ No newline at end of file diff --git a/src/components/workload_mod/test_helpers.rs b/src/components/workload_mod/test_helpers.rs new file mode 100644 index 0000000..8c8701c --- /dev/null +++ b/src/components/workload_mod/test_helpers.rs @@ -0,0 +1,105 @@ +// Copyright (c) 2024 Elektrobit Automotive GmbH +// +// This program and the accompanying materials are made available under the +// terms of the Apache License, Version 2.0 which is available at +// https://www.apache.org/licenses/LICENSE-2.0. +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. +// +// SPDX-License-Identifier: Apache-2.0 + +use std::collections::HashMap; +use std::path::Path; +use api::ank_base; +use crate::Workload; + +#[cfg(any(feature = "test_utils", test))] +pub fn read_to_string_mock(_path: &Path) -> Result { + Ok(_path.to_str().unwrap().to_string()) +} + +#[cfg(any(feature = "test_utils", test))] +pub fn generate_test_dependencies() -> HashMap { + + HashMap::from([ + (String::from("workload_C"), ank_base::AddCondition::AddCondRunning as i32), + (String::from("workload_A"), ank_base::AddCondition::AddCondSucceeded as i32), + ]) +} + +pub fn generate_test_runtime_config() -> String { + String::from( + r#"generalOptions: ["--version"] +commandOptions: ["--network=host"] +image: alpine:latest +commandArgs: ["bash"] +"# + ) +} + +#[cfg(any(feature = "test_utils", test))] +pub fn generate_test_workload_proto>( + agent_name: T, + runtime_name: T, +) -> ank_base::Workload { + let runtime_config = generate_test_runtime_config(); + let deps = generate_test_dependencies(); + + ank_base::Workload { + agent: Some(agent_name.into()), + runtime: Some(runtime_name.into()), + runtime_config: Some(runtime_config), + restart_policy: Some(ank_base::RestartPolicy::Always as i32), + dependencies: Some(ank_base::Dependencies { + dependencies: deps + }), + tags: Some(ank_base::Tags { tags: vec![ank_base::Tag { + key: String::from("key_test"), + value: String::from("val_test"), + }] }), + control_interface_access: Some(ank_base::ControlInterfaceAccess { + allow_rules: vec![ank_base::AccessRightsRule { + access_rights_rule_enum: Some(ank_base::access_rights_rule::AccessRightsRuleEnum::StateRule( + ank_base::StateRule { + operation: ank_base::ReadWriteEnum::RwRead as i32, + filter_masks: vec![String::from("desiredState.workloads.workload_A")], + } + )), + }], + deny_rules: vec![ank_base::AccessRightsRule { + access_rights_rule_enum: Some(ank_base::access_rights_rule::AccessRightsRuleEnum::StateRule( + ank_base::StateRule { + operation: ank_base::ReadWriteEnum::RwWrite as i32, + filter_masks: vec![String::from("desiredState.workloads.workload_B")], + } + )), + }], + }), + configs: Some(ank_base::ConfigMappings { + configs: [ + (String::from("alias_test"), String::from("config_1")), + ].iter().cloned().collect(), + }), + } +} + +#[cfg(any(feature = "test_utils", test))] +pub fn generate_test_workload>( + agent_name: T, + workload_name: T, + runtime_name: T, +) -> Workload { + + let name = workload_name.into(); + + Workload { + workload: generate_test_workload_proto(agent_name, runtime_name), + main_mask: format!("desiredState.workloads.{}", name.clone()), + masks: vec![format!("desiredState.workloads.{}", name.clone())], + name, + } +} diff --git a/src/components/workload_mod/workload.rs b/src/components/workload_mod/workload.rs new file mode 100644 index 0000000..984eb17 --- /dev/null +++ b/src/components/workload_mod/workload.rs @@ -0,0 +1,772 @@ +// Copyright (c) 2024 Elektrobit Automotive GmbH +// +// This program and the accompanying materials are made available under the +// terms of the Apache License, Version 2.0 which is available at +// https://www.apache.org/licenses/LICENSE-2.0. +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. +// +// SPDX-License-Identifier: Apache-2.0 + +use std::fmt; +use std::{collections::HashMap, path::Path, vec}; +use serde_yaml; +pub use api::ank_base; +use crate::AnkaiosError; +use crate::WorkloadBuilder; + +// Disable this from coverage +// https://github.com/rust-lang/rust/issues/84605 +#[cfg(not(test))] +fn read_file_to_string(path: &Path) -> Result { + std::fs::read_to_string(path) +} + +#[cfg(test)] +use crate::components::workload_mod::test_helpers::read_to_string_mock as read_file_to_string; + +#[derive(Debug, Clone)] +pub struct Workload{ + pub(crate) workload: ank_base::Workload, + pub(crate) main_mask: String, + pub masks: Vec, + pub name: String, +} + +impl Workload { + pub fn new_from_builder>(name: T) -> Self { + let name_str = name.into(); + Self{ + workload: ank_base::Workload::default(), + main_mask: format!("desiredState.workloads.{}", name_str), + masks: vec![format!("desiredState.workloads.{}", name_str)], + name: name_str, + } + } + + pub fn new_from_proto>(name: T, proto: ank_base::Workload) -> Self { + let name_str = name.into(); + Self{ + workload: proto, + main_mask: format!("desiredState.workloads.{}", name_str), + masks: vec![], + name: name_str, + } + } + + pub fn new_from_dict>(name: T, dict_workload: serde_yaml::Mapping) -> Result { + let mut wl_builder = Self::builder(); + wl_builder = wl_builder.workload_name(name); + + if let Some(agent) = dict_workload.get("agent") { + wl_builder = wl_builder.agent_name(agent.as_str().unwrap()) + } + if let Some(runtime) = dict_workload.get("runtime") { + wl_builder = wl_builder.runtime(runtime.as_str().unwrap()) + } + if let Some(runtime_config) = dict_workload.get("runtimeConfig") { + wl_builder = wl_builder.runtime_config(runtime_config.as_str().unwrap()) + } + if let Some(restart_policy) = dict_workload.get("restartPolicy") { + wl_builder = wl_builder.restart_policy(restart_policy.as_str().unwrap()) + } + if let Some(dependencies) = dict_workload.get("dependencies") { + for (key, value) in dependencies.as_mapping().unwrap() { + wl_builder = wl_builder.add_dependency( + key.as_str().unwrap(), + value.as_str().unwrap()); + } + } + if let Some(tags) = dict_workload.get("tags") { + for tag in tags.as_sequence().unwrap() { + wl_builder = wl_builder.add_tag( + tag.get("key").unwrap().as_str().unwrap(), + tag.get("value").unwrap().as_str().unwrap()); + } + } + if let Some(control_interface_access) = dict_workload.get("controlInterfaceAccess") { + if let Some(allow_rules) = control_interface_access.get("allowRules") { + for rule in allow_rules.as_sequence().unwrap() { + let operation = rule.get("operation").unwrap().as_str().unwrap(); + let filter_masks = rule.get("filterMask") + .unwrap().as_sequence().unwrap().iter().map(|x| x.as_str().unwrap().to_string()).collect(); + wl_builder = wl_builder.add_allow_rule(operation, filter_masks); + } + } + if let Some(deny_rules) = control_interface_access.get("denyRules") { + for rule in deny_rules.as_sequence().unwrap() { + let operation = rule.get("operation").unwrap().as_str().unwrap(); + let filter_masks = rule.get("filterMask") + .unwrap().as_sequence().unwrap().iter().map(|x| x.as_str().unwrap().to_string()).collect(); + wl_builder = wl_builder.add_deny_rule(operation, filter_masks); + } + } + } + if let Some(configs) = dict_workload.get("configs") { + for (alias, name) in configs.as_mapping().unwrap() { + wl_builder = wl_builder.add_config( + alias.as_str().unwrap(), + name.as_str().unwrap()); + } + } + + wl_builder.build() + } + + pub fn to_proto(&self) -> ank_base::Workload { + self.workload.clone() + } + + pub fn to_dict(&self) -> serde_yaml::Mapping { + let mut dict = serde_yaml::Mapping::new(); + if self.workload.agent.is_some() { + dict.insert( + serde_yaml::Value::String("agent".to_string()), + serde_yaml::Value::String(self.workload.agent.clone().unwrap())); + } + if self.workload.runtime.is_some() { + dict.insert( + serde_yaml::Value::String("runtime".to_string()), + serde_yaml::Value::String(self.workload.runtime.clone().unwrap())); + } + if self.workload.runtime_config.is_some() { + dict.insert( + serde_yaml::Value::String("runtimeConfig".to_string()), + serde_yaml::Value::String(self.workload.runtime_config.clone().unwrap())); + } + if self.workload.restart_policy.is_some() { + dict.insert( + serde_yaml::Value::String("restartPolicy".to_string()), + serde_yaml::Value::String(ank_base::RestartPolicy::from_i32(self.workload.restart_policy.unwrap()).unwrap().as_str_name().to_string())); + } + if self.workload.dependencies.is_some() { + let mut deps = serde_yaml::Mapping::new(); + dict.insert( + serde_yaml::Value::String("dependencies".to_string()), + serde_yaml::Value::Mapping(serde_yaml::Mapping::new())); + for (key, value) in &self.workload.dependencies.clone().unwrap().dependencies { + deps.insert( + serde_yaml::Value::String(key.clone()), + serde_yaml::Value::String(ank_base::AddCondition::from_i32(*value).unwrap().as_str_name().to_string())); + } + dict.insert( + serde_yaml::Value::String("dependencies".to_string()), + serde_yaml::Value::Mapping(deps)); + } + if self.workload.tags.is_some() { + let mut tags = serde_yaml::Sequence::new(); + for tag in &self.workload.tags.clone().unwrap().tags { + let mut tag_dict = serde_yaml::Mapping::new(); + tag_dict.insert( + serde_yaml::Value::String("key".to_string()), + serde_yaml::Value::String(tag.key.clone())); + tag_dict.insert( + serde_yaml::Value::String("value".to_string()), + serde_yaml::Value::String(tag.value.clone())); + tags.push(serde_yaml::Value::Mapping(tag_dict)); + } + dict.insert( + serde_yaml::Value::String("tags".to_string()), + serde_yaml::Value::Sequence(tags)); + } + if self.workload.control_interface_access.is_some() { + let mut control_interface_access = serde_yaml::Mapping::new(); + + let mut allow_rules = serde_yaml::Sequence::new(); + for rule in &self.workload.control_interface_access.clone().unwrap().allow_rules { + let mut rule_dict = serde_yaml::Mapping::new(); + rule_dict.insert( + serde_yaml::Value::String("type".to_string()), + serde_yaml::Value::String("StateRule".to_string())); + if let ank_base::AccessRightsRule { + access_rights_rule_enum: Some( + ank_base::access_rights_rule::AccessRightsRuleEnum::StateRule(rule) + ), + } = rule { + match self.access_right_rule_to_str(rule) { + Ok(rule) => { + rule_dict.insert( + serde_yaml::Value::String("operation".to_string()), + serde_yaml::Value::String(rule.0)); + rule_dict.insert( + serde_yaml::Value::String("filterMask".to_string()), + serde_yaml::Value::Sequence(rule.1.into_iter().map(serde_yaml::Value::String).collect())); + }, + Err(_) => continue, + }; + } + allow_rules.push(serde_yaml::Value::Mapping(rule_dict)); + } + if !allow_rules.is_empty() { + control_interface_access.insert( + serde_yaml::Value::String("allowRules".to_string()), + serde_yaml::Value::Sequence(allow_rules)); + } + + let mut deny_rules = serde_yaml::Sequence::new(); + for rule in &self.workload.control_interface_access.clone().unwrap().deny_rules { + let mut rule_dict = serde_yaml::Mapping::new(); + rule_dict.insert( + serde_yaml::Value::String("type".to_string()), + serde_yaml::Value::String("StateRule".to_string())); + if let ank_base::AccessRightsRule { + access_rights_rule_enum: Some( + ank_base::access_rights_rule::AccessRightsRuleEnum::StateRule(rule) + ), + } = rule { + match self.access_right_rule_to_str(rule) { + Ok(rule) => { + rule_dict.insert( + serde_yaml::Value::String("operation".to_string()), + serde_yaml::Value::String(rule.0)); + rule_dict.insert( + serde_yaml::Value::String("filterMask".to_string()), + serde_yaml::Value::Sequence(rule.1.into_iter().map(serde_yaml::Value::String).collect())); + }, + Err(_) => continue, + }; + } + deny_rules.push(serde_yaml::Value::Mapping(rule_dict)); + } + if !deny_rules.is_empty() { + control_interface_access.insert( + serde_yaml::Value::String("denyRules".to_string()), + serde_yaml::Value::Sequence(deny_rules)); + } + + dict.insert( + serde_yaml::Value::String("controlInterfaceAccess".to_string()), + serde_yaml::Value::Mapping(control_interface_access)); + } + if self.workload.configs.is_some() { + let mut configs = serde_yaml::Mapping::new(); + for (alias, name) in &self.workload.configs.clone().unwrap().configs { + configs.insert( + serde_yaml::Value::String(alias.clone()), + serde_yaml::Value::String(name.clone())); + } + dict.insert( + serde_yaml::Value::String("configs".to_string()), + serde_yaml::Value::Mapping(configs)); + } + dict + } + + pub fn builder() -> WorkloadBuilder { + WorkloadBuilder::new() + } + + pub fn update_workload_name>(&mut self, new_name: T) { + self.name = new_name.into(); + self.main_mask = format!("desiredState.workloads.{}", self.name); + self.masks = vec![format!("desiredState.workloads.{}", self.name)]; + } + + pub fn update_agent_name>(&mut self, agent_name: T) { + self.workload.agent = Some(agent_name.into()); + self.add_mask(format!("{}.agent", self.main_mask)); + } + + pub fn update_runtime>(&mut self, runtime: T) { + self.workload.runtime = Some(runtime.into()); + self.add_mask(format!("{}.runtime", self.main_mask)); + } + + pub fn update_runtime_config>(&mut self, runtime_config: T) { + self.workload.runtime_config = Some(runtime_config.into()); + self.add_mask(format!("{}.runtimeConfig", self.main_mask)); + } + + pub fn update_runtime_config_from_file(&mut self, file_path: &Path) -> Result<(), AnkaiosError> { + let runtime_config = match read_file_to_string(file_path) { + Ok(config) => config, + Err(err) => return Err(AnkaiosError::IoError(err)) + }; + self.update_runtime_config(runtime_config); + Ok(()) + } + + pub fn update_restart_policy>(&mut self, restart_policy: T) -> Result<(), AnkaiosError> { + let restart_policy = restart_policy.into(); + self.workload.restart_policy = match ank_base::RestartPolicy::from_str_name(&restart_policy.clone()) { + Some(policy) => Some(policy as i32), + _ => return Err(AnkaiosError::WorkloadFieldError( + "restartPolicy".to_string(), + restart_policy + )) + }; + self.add_mask(format!("{}.restartPolicy", self.main_mask)); + Ok(()) + } + + pub fn get_dependencies(&self) -> HashMap { + let mut dependencies = HashMap::new(); + if let Some(deps) = &self.workload.dependencies { + for (key, value) in &deps.dependencies { + dependencies.insert(key.clone(), ank_base::AddCondition::from_i32(*value).unwrap().as_str_name().to_string()); + } + } + dependencies + } + + pub fn update_dependencies>(&mut self, dependencies: HashMap) -> Result<(), AnkaiosError> { + self.workload.dependencies = Some(ank_base::Dependencies::default()); + for (workload_name, condition) in dependencies { + let cond = condition.into(); + let add_condition = match ank_base::AddCondition::from_str_name(&cond.clone()) { + Some(cond) => cond as i32, + _ => return Err(AnkaiosError::WorkloadFieldError( + "dependency condition".to_string(), + cond + )), + }; + if self.workload.dependencies.is_none() { + self.workload.dependencies = Some(ank_base::Dependencies::default()); + } + self.workload.dependencies.as_mut().unwrap().dependencies.insert(workload_name.into(), add_condition); + } + self.add_mask(format!("{}.dependencies", self.main_mask)); + Ok(()) + } + + pub fn add_tag>(&mut self, key: T, value: T) { + if self.workload.tags.is_none() { + self.workload.tags = Some(ank_base::Tags::default()); + } + let key = key.into(); + self.workload.tags.as_mut().unwrap().tags.push(ank_base::Tag{key: key.clone(), value: value.into()}); + if !self.masks.contains(&format!("{}.tags", self.main_mask)) { + self.add_mask(format!("{}.tags.{}", self.main_mask, key)); + } + } + + pub fn get_tags(&self) -> Vec> { + let mut tags = vec![]; + if let Some(tags_list) = &self.workload.tags { + for tag in &tags_list.tags { + tags.push(vec![tag.key.clone(), tag.value.clone()]); + } + } + tags + } + + pub fn update_tags(&mut self, tags: &Vec>) { + self.workload.tags = Some(ank_base::Tags::default()); + for tag in tags { + self.workload.tags.as_mut().unwrap().tags.push(ank_base::Tag{key: tag[0].clone(), value: tag[1].clone()}); + } + self.masks.retain(|mask| !mask.starts_with(&format!("{}.tags", self.main_mask))); + self.add_mask(format!("{}.tags", self.main_mask)); + } + + fn generate_access_right_rule(&self, operation: &str, filter_masks: Vec) -> Result { + Ok(ank_base::AccessRightsRule { + access_rights_rule_enum: Some(ank_base::access_rights_rule::AccessRightsRuleEnum::StateRule( + ank_base::StateRule { + operation: match operation { + "Nothing" => ank_base::ReadWriteEnum::RwNothing as i32, + "Write" => ank_base::ReadWriteEnum::RwWrite as i32, + "Read" => ank_base::ReadWriteEnum::RwRead as i32, + "ReadWrite" => ank_base::ReadWriteEnum::RwReadWrite as i32, + _ => return Err(AnkaiosError::WorkloadFieldError( + "operation".to_string(), + operation.to_string(), + )), + }, + filter_masks, + } + )), + }) + } + + fn access_right_rule_to_str(&self, rule: &ank_base::StateRule) -> Result<(String, Vec), AnkaiosError> { + Ok((match ank_base::ReadWriteEnum::from_i32(rule.operation) { + Some(op) => match op.as_str_name() { + "RW_NOTHING" => "Nothing".to_string(), + "RW_WRITE" => "Write".to_string(), + "RW_READ" => "Read".to_string(), + "RW_READ_WRITE" => "ReadWrite".to_string(), + _ => return Err(AnkaiosError::WorkloadFieldError( + "operation".to_string(), + rule.operation.to_string(), + )) + }, + _ => return Err(AnkaiosError::WorkloadFieldError( + "operation".to_string(), + rule.operation.to_string(), + )), + }, rule.filter_masks.clone())) + } + + pub fn get_allow_rules(&self) -> Result)>, AnkaiosError> { + let mut rules = vec![]; + if let Some(access) = &self.workload.control_interface_access { + for rule in &access.allow_rules { + if let ank_base::AccessRightsRule { + access_rights_rule_enum: Some( + ank_base::access_rights_rule::AccessRightsRuleEnum::StateRule(rule) + ), + } = rule { + rules.push(match self.access_right_rule_to_str(rule) { + Ok(rule) => rule, + Err(err) => return Err(err), + }); + } + } + } + Ok(rules) + } + + pub fn update_allow_rules>(&mut self, rules: Vec<(T, Vec)>) -> Result<(), AnkaiosError> { + if self.workload.control_interface_access.is_none() { + self.workload.control_interface_access = Some(ank_base::ControlInterfaceAccess::default()); + } + self.workload.control_interface_access.as_mut().unwrap().allow_rules = vec![]; + for rule in rules { + let rule = match self.generate_access_right_rule( + rule.0.into().as_str(), + rule.1.into_iter().map(|x| x.into()).collect() + ) { + Ok(rule) => rule, + Err(err) => return Err(err), + }; + self.workload.control_interface_access.as_mut().unwrap().allow_rules.push(rule); + } + self.add_mask(format!("{}.controlInterfaceAccess.allowRules", self.main_mask)); + Ok(()) + } + + pub fn get_deny_rules(&self) -> Result)>, AnkaiosError> { + let mut rules = vec![]; + if let Some(access) = &self.workload.control_interface_access { + for rule in &access.deny_rules { + if let ank_base::AccessRightsRule { + access_rights_rule_enum: Some(ank_base::access_rights_rule::AccessRightsRuleEnum::StateRule(rule)), + } = rule { + rules.push(match self.access_right_rule_to_str(rule) { + Ok(rule) => rule, + Err(err) => return Err(err), + }); + } + } + } + Ok(rules) + } + + pub fn update_deny_rules>(&mut self, rules: Vec<(T, Vec)>) -> Result<(), AnkaiosError> { + if self.workload.control_interface_access.is_none() { + self.workload.control_interface_access = Some(ank_base::ControlInterfaceAccess::default()); + } + self.workload.control_interface_access.as_mut().unwrap().deny_rules = vec![]; + for rule in rules { + let rule = match self.generate_access_right_rule( + rule.0.into().as_str(), + rule.1.into_iter().map(|x| x.into()).collect() + ){ + Ok(rule) => rule, + Err(err) => return Err(err), + }; + self.workload.control_interface_access.as_mut().unwrap().deny_rules.push(rule); + } + self.add_mask(format!("{}.controlInterfaceAccess.denyRules", self.main_mask)); + Ok(()) + } + + pub fn add_config>(&mut self, alias: T, name: T) { + if self.workload.configs.is_none() { + self.workload.configs = Some(ank_base::ConfigMappings{ + configs: [ + (alias.into(), name.into()), + ].into(), + }); + } + else { + self.workload.configs.as_mut().unwrap().configs.insert(alias.into(), name.into()); + } + self.add_mask(format!("{}.configs", self.main_mask)); + } + + pub fn get_configs(&self) -> HashMap { + let mut configs = HashMap::new(); + if let Some(configs_map) = &self.workload.configs { + for (alias, name) in &configs_map.configs { + configs.insert(alias.clone(), name.clone()); + } + } + configs + } + + pub fn update_configs(&mut self, configs: HashMap) { + self.workload.configs = Some(ank_base::ConfigMappings{ + configs: configs.into_iter().collect(), + }); + } + + fn add_mask(&mut self, mask: String) { + if !self.masks.contains(&mask) && !self.masks.contains(&self.main_mask) { + self.masks.push(mask); + } + } +} + +impl fmt::Display for Workload { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "Workload {}: {:?}", self.name, self.to_proto()) + } +} + + +////////////////////////////////////////////////////////////////////////////// +// ######## ####### ######### ######### // +// ## ## ## ## // +// ## ##### ######### ## // +// ## ## ## ## // +// ## ####### ######### ## // +////////////////////////////////////////////////////////////////////////////// + +#[cfg(test)] +mod tests { + use std::collections::HashMap; + use std::path::Path; + use super::Workload; + use crate::components::workload_mod::test_helpers::{ + generate_test_workload, generate_test_workload_proto, generate_test_runtime_config + }; + + #[test] + fn utest_workload() { + let wl_test = generate_test_workload("agent_A".to_string(), "Test".to_string(), "podman".to_string()); + let wl_proto = generate_test_workload_proto("agent_A".to_string(), "podman".to_string()); + assert_eq!(wl_test.name, "Test"); + assert_eq!(wl_test.main_mask, "desiredState.workloads.Test"); + assert_eq!(wl_test.masks, vec!["desiredState.workloads.Test".to_string()]); + assert_eq!(wl_test.workload, wl_proto); + } + + #[test] + fn utest_workload_proto() { + let workload_proto = generate_test_workload_proto("agent_A".to_string(), "podman".to_string()); + let wl = Workload::new_from_proto("Test", workload_proto.clone()); + let new_proto = wl.to_proto(); + assert_eq!(workload_proto, new_proto); + } + + #[test] + fn utest_workload_dict(){ + let workload = generate_test_workload("agent_A", "nginx", "podman"); + let workload_dict = workload.to_dict(); + let workload_new = Workload::new_from_dict("nginx", workload_dict); + assert!(workload_new.is_ok()); + assert_eq!(workload.to_proto(), workload_new.unwrap().to_proto()); + } + + #[test] + fn utest_update_fields() { + let mut wl = generate_test_workload("Agent_A", "Test", "podman"); + assert_eq!(wl.masks, vec!["desiredState.workloads.Test".to_string()]); + + wl.update_workload_name("TestNew"); + assert_eq!(wl.name, "TestNew"); + + wl.update_agent_name("agent_B"); + assert_eq!(wl.workload.agent, Some("agent_B".to_string())); + + wl.update_runtime("podman-kube"); + assert_eq!(wl.workload.runtime, Some("podman-kube".to_string())); + + wl.update_runtime_config("config_test"); + assert_eq!(wl.workload.runtime_config, Some("config_test".to_string())); + + assert!(wl.update_restart_policy("NEVER").is_ok()); + assert_eq!(wl.workload.restart_policy, Some(0)); + + assert!(wl.update_restart_policy("Dance").is_err()); + + let tags = vec![vec!["key_test".to_string(), "val_test".to_string()]]; + wl.update_tags(&tags); + assert_eq!(wl.get_tags(), tags); + + let allow_rules = vec![("Read".to_string(), vec!["desiredState.workloads.workload_A".to_string()])]; + assert!(wl.update_allow_rules(allow_rules.clone()).is_ok()); + assert_eq!(wl.get_allow_rules().unwrap(), allow_rules); + + let deny_rules = vec![("Write".to_string(), vec!["desiredState.workloads.workload_B".to_string()])]; + assert!(wl.update_deny_rules(deny_rules.clone()).is_ok()); + assert_eq!(wl.get_deny_rules().unwrap(), deny_rules); + } + + #[test] + fn utest_dependencies() { + let mut wl = generate_test_workload("Agent_A", "Test", "podman"); + let mut deps = wl.get_dependencies(); + assert_eq!(deps.len(), 2); + + deps.remove("workload_A"); + assert!(wl.update_dependencies(deps).is_ok()); + assert_eq!(wl.get_dependencies().len(), 1); + + assert!(wl.update_dependencies(HashMap::from([("workload_A", "Dance")])).is_err()); + } + + #[test] + fn utest_tags() { + let mut wl = generate_test_workload("Agent_A", "Test", "podman"); + let mut tags = wl.get_tags(); + assert_eq!(tags.len(), 1); + + wl.add_tag("key_test_2", "val_test_2"); + tags.push(vec!["key_test_2".to_string(), "val_test_2".to_string()]); + assert_eq!(wl.get_tags().len(), 2); + assert_eq!(wl.get_tags(), tags); + + tags.remove(0); + wl.update_tags(&tags); + assert_eq!(wl.get_tags().len(), 1); + } + + #[test] + fn utest_rules() { + let mut wl = generate_test_workload("Agent_A", "Test", "podman"); + let mut allow_rules = wl.get_allow_rules().unwrap(); + assert_eq!(allow_rules.len(), 1); + + allow_rules.push(("Write".to_string(), vec!["desiredState.workloads.workload_B".to_string()])); + assert!(wl.update_allow_rules(allow_rules).is_ok()); + assert_eq!(wl.get_allow_rules().unwrap().len(), 2); + + assert!(wl.update_allow_rules(vec![("Dance".to_string(), vec!["desiredState.workloads.workload_A".to_string()])]).is_err()); + + let mut deny_rules = wl.get_deny_rules().unwrap(); + assert_eq!(deny_rules.len(), 1); + + deny_rules.push(("Read".to_string(), vec!["desiredState.workloads.workload_A".to_string()])); + assert!(wl.update_deny_rules(deny_rules).is_ok()); + assert_eq!(wl.get_deny_rules().unwrap().len(), 2); + + assert!(wl.update_deny_rules(vec![("Dance".to_string(), vec!["desiredState.workloads.workload_A".to_string()])]).is_err()); + } + + #[test] + fn utest_configs() { + let mut wl = generate_test_workload("Agent_A", "Test", "podman"); + let mut configs = wl.get_configs(); + assert_eq!(configs.len(), 1); + + wl.add_config("alias_test_2", "config_test_2"); + configs = wl.get_configs(); + assert_eq!(configs.len(), 2); + + configs.insert("alias_test_3".to_string(), "config_test_3".to_string()); + wl.update_configs(configs.clone()); + assert_eq!(wl.get_configs().len(), 3); + } + + macro_rules! generate_test_for_mask_generation { + ($test_name:ident, $method_name:ident, $expected_value:expr, $($args:expr),*) => { + #[test] + fn $test_name() { + let mut obj = Workload { + workload: generate_test_workload_proto("Agent_A".to_string(), "podman".to_string()), + main_mask: format!("desiredState.workloads.Test"), + masks: vec![], + name: "Test".to_string(), + }; + // Call function and assert the mask has been added + let _ = obj.$method_name($($args),*); + assert_eq!(obj.masks.len(), 1); + assert_eq!(obj.masks, $expected_value); + + // Adding again should not add another identical mask + let _ = obj.$method_name($($args),*); + assert_eq!(obj.masks.len(), 1); + } + }; + } + + generate_test_for_mask_generation!(utest_update_workload_name, update_workload_name, + vec![String::from("desiredState.workloads.TestNew")], "TestNew"); + generate_test_for_mask_generation!(utest_update_agent_name, update_agent_name, + vec![String::from("desiredState.workloads.Test.agent")], "agent_B"); + generate_test_for_mask_generation!(utest_update_runtime, update_runtime, + vec![String::from("desiredState.workloads.Test.runtime")], "podman"); + generate_test_for_mask_generation!(utest_update_restart_policy, update_restart_policy, + vec![String::from("desiredState.workloads.Test.restartPolicy")], "NEVER"); + generate_test_for_mask_generation!(utest_update_runtime_config, update_runtime_config, + vec![String::from("desiredState.workloads.Test.runtimeConfig")], "config"); + generate_test_for_mask_generation!(utest_update_runtime_config_from_file, update_runtime_config_from_file, + vec![String::from("desiredState.workloads.Test.runtimeConfig")], Path::new("")); + generate_test_for_mask_generation!(utest_update_dependencies, update_dependencies, + vec![String::from("desiredState.workloads.Test.dependencies")], HashMap::from([("workload_A", "ADD_COND_RUNNING")])); + generate_test_for_mask_generation!(utest_add_tag, add_tag, + vec![String::from("desiredState.workloads.Test.tags.key_test")], "key_test", "val_test"); + generate_test_for_mask_generation!(utest_update_tags, update_tags, + vec![String::from("desiredState.workloads.Test.tags")], &vec![vec!["key_test".to_string(), "val_test".to_string()]]); + generate_test_for_mask_generation!(utest_update_allow_rule, update_allow_rules, + vec![String::from("desiredState.workloads.Test.controlInterfaceAccess.allowRules")], vec![("Read".to_string(), vec!["desiredState.workloads.workload_A".to_string()])]); + generate_test_for_mask_generation!(utest_update_deny_rule, update_deny_rules, + vec![String::from("desiredState.workloads.Test.controlInterfaceAccess.denyRules")], vec![("Write".to_string(), vec!["desiredState.workloads.workload_B".to_string()])]); + generate_test_for_mask_generation!(utest_add_config, add_config, + vec![String::from("desiredState.workloads.Test.configs")], "alias_test", "config_test"); + + #[test] + fn utest_workload_builder() { + let wl = Workload::builder() + .workload_name("Test") + .agent_name("agent_A") + .runtime("podman") + .runtime_config_from_file(Path::new(generate_test_runtime_config().as_str())).unwrap() + .restart_policy("ALWAYS") + .add_dependency("workload_A", "ADD_COND_SUCCEEDED") + .add_dependency("workload_C", "ADD_COND_RUNNING") + .add_tag("key_test", "val_test") + .add_allow_rule("Read", vec!["desiredState.workloads.workload_A".to_string()]) + .add_deny_rule("Write", vec!["desiredState.workloads.workload_B".to_string()]) + .add_config("alias_test", "config_1") + .build(); + assert!(wl.is_ok()); + assert_eq!(wl.unwrap().to_proto(), generate_test_workload_proto("agent_A".to_string(), "podman".to_string())); + } + + #[test] + fn utest_build_return_err() { + // No workload name + assert!(Workload::builder() + .agent_name("agent_A") + .runtime("podman") + .runtime_config("config") + .build() + .is_err() + ); + + // No agent + assert!(Workload::builder() + .workload_name("Test") + .runtime("podman") + .runtime_config("config") + .build() + .is_err() + ); + + // No runtime + assert!(Workload::builder() + .workload_name("Test") + .agent_name("agent_A") + .runtime_config("config") + .build() + .is_err() + ); + + // No runtime config + assert!(Workload::builder() + .workload_name("Test") + .agent_name("agent_A") + .runtime("podman") + .build() + .is_err() + ); + } +} \ No newline at end of file diff --git a/src/components/workload_mod/workload_builder.rs b/src/components/workload_mod/workload_builder.rs new file mode 100644 index 0000000..cdcfc65 --- /dev/null +++ b/src/components/workload_mod/workload_builder.rs @@ -0,0 +1,232 @@ +// Copyright (c) 2024 Elektrobit Automotive GmbH +// +// This program and the accompanying materials are made available under the +// terms of the Apache License, Version 2.0 which is available at +// https://www.apache.org/licenses/LICENSE-2.0. +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. +// +// SPDX-License-Identifier: Apache-2.0 + +use std::{collections::HashMap, path::Path, vec}; +use crate::AnkaiosError; +use crate::Workload; + +// Disable this from coverage +// https://github.com/rust-lang/rust/issues/84605 +#[cfg(not(test))] +fn read_file_to_string(path: &Path) -> Result { + std::fs::read_to_string(path) +} + +#[cfg(test)] +use crate::components::workload_mod::test_helpers::read_to_string_mock as read_file_to_string; + +#[derive(Debug, Default)] +pub struct WorkloadBuilder { + pub wl_name: String, + pub wl_agent_name: String, + pub wl_runtime: String, + pub wl_runtime_config: String, + pub wl_restart_policy: Option, + pub dependencies: HashMap, + pub tags: Vec>, + pub allow_rules: Vec<(String, Vec)>, + pub deny_rules: Vec<(String, Vec)>, + pub configs: HashMap, +} + +impl WorkloadBuilder{ + pub fn new() -> Self { + Self::default() + } + + pub fn workload_name>(mut self, name: T) -> Self { + self.wl_name = name.into(); + self + } + + pub fn agent_name>(mut self, name: T) -> Self { + self.wl_agent_name = name.into(); + self + } + + pub fn runtime>(mut self, runtime: T) -> Self { + self.wl_runtime = runtime.into(); + self + } + + pub fn runtime_config>(mut self, runtime_config: T) -> Self { + self.wl_runtime_config = runtime_config.into(); + self + } + + pub fn runtime_config_from_file(self, file_path: &Path) -> Result { + let runtime_config = match read_file_to_string(file_path) { + Ok(config) => config, + Err(err) => return Err(AnkaiosError::IoError(err)), + }; + Ok(self.runtime_config(runtime_config)) + } + + pub fn restart_policy>(mut self, restart_policy: T) -> Self { + self.wl_restart_policy = Some(restart_policy.into()); + self + } + + pub fn add_dependency>(mut self, workload_name: T, condition: T) -> Self { + self.dependencies.insert(workload_name.into(), condition.into()); + self + } + + pub fn add_tag>(mut self, key: T, value: T) -> Self { + self.tags.push(vec![key.into(), value.into()]); + self + } + + pub fn add_allow_rule>(mut self, operation: T, filter_masks: Vec) -> Self { + self.allow_rules.push((operation.into(), filter_masks)); + self + } + + pub fn add_deny_rule>(mut self, operation: T, filter_masks: Vec) -> Self { + self.deny_rules.push((operation.into(), filter_masks)); + self + } + + pub fn add_config>(mut self, alias: T, name: T) -> Self { + self.configs.insert(alias.into(), name.into()); + self + } + + pub fn build(self) -> Result { + if self.wl_name.is_empty() { + return Err(AnkaiosError::WorkloadBuilderError("Workload can not be built without a name.")); + } + let mut wl = Workload::new_from_builder(self.wl_name.clone()); + + if self.wl_agent_name.is_empty() { + return Err(AnkaiosError::WorkloadBuilderError("Workload can not be built without an agent name.")); + } + if self.wl_runtime.is_empty() { + return Err(AnkaiosError::WorkloadBuilderError("Workload can not be built without a runtime.")); + } + if self.wl_runtime_config.is_empty() { + return Err(AnkaiosError::WorkloadBuilderError("Workload can not be built without a runtime config.")); + } + + wl.update_agent_name(self.wl_agent_name.clone()); + wl.update_runtime(self.wl_runtime.clone()); + wl.update_runtime_config(self.wl_runtime_config.clone()); + + if let Some(restart_policy) = self.wl_restart_policy.clone() { + wl.update_restart_policy(restart_policy)?; + } + if !self.dependencies.is_empty() { + wl.update_dependencies(self.dependencies.clone())?; + } + if !self.tags.is_empty() { + wl.update_tags(&self.tags); + } + if !self.allow_rules.is_empty() { + wl.update_allow_rules(self.allow_rules.clone())?; + } + if !self.deny_rules.is_empty() { + wl.update_deny_rules(self.deny_rules.clone())?; + } + if !self.configs.is_empty() { + wl.update_configs(self.configs.clone()); + } + + Ok(wl) + } +} + + +////////////////////////////////////////////////////////////////////////////// +// ######## ####### ######### ######### // +// ## ## ## ## // +// ## ##### ######### ## // +// ## ## ## ## // +// ## ####### ######### ## // +////////////////////////////////////////////////////////////////////////////// + +#[cfg(test)] +mod tests { + use std::path::Path; + use crate::AnkaiosError; + use super::Workload; + use crate::components::workload_mod::test_helpers::{ + generate_test_workload_proto, generate_test_runtime_config + }; + + #[test] + fn utest_workload_builder() { + let wl = Workload::builder() + .workload_name("Test") + .agent_name("agent_A") + .runtime("podman") + .runtime_config_from_file(Path::new(generate_test_runtime_config().as_str())).unwrap() + .restart_policy("ALWAYS") + .add_dependency("workload_A", "ADD_COND_SUCCEEDED") + .add_dependency("workload_C", "ADD_COND_RUNNING") + .add_tag("key_test", "val_test") + .add_allow_rule("Read", vec!["desiredState.workloads.workload_A".to_string()]) + .add_deny_rule("Write", vec!["desiredState.workloads.workload_B".to_string()]) + .add_config("alias_test", "config_1") + .build(); + assert!(wl.is_ok()); + assert_eq!(wl.unwrap().to_proto(), generate_test_workload_proto("agent_A".to_string(), "podman".to_string())); + } + + #[test] + fn utest_build_return_err() { + // No workload name + assert!(matches!( + Workload::builder() + .agent_name("agent_A") + .runtime("podman") + .runtime_config("config") + .build() + .unwrap_err(), + AnkaiosError::WorkloadBuilderError(msg) if msg == "Workload can not be built without a name." + )); + + // No agent + assert!(matches!( + Workload::builder() + .workload_name("Test") + .runtime("podman") + .runtime_config("config") + .build() + .unwrap_err(), + AnkaiosError::WorkloadBuilderError(msg) if msg == "Workload can not be built without an agent name." + )); + + // No runtime + assert!(matches!( + Workload::builder() + .workload_name("Test") + .agent_name("agent_A") + .runtime_config("config") + .build() + .unwrap_err(), + AnkaiosError::WorkloadBuilderError(msg) if msg == "Workload can not be built without a runtime." + )); + + // No runtime config + assert!(matches!( + Workload::builder() + .workload_name("Test") + .agent_name("agent_A") + .runtime("podman") + .build() + .unwrap_err(), + AnkaiosError::WorkloadBuilderError(msg) if msg == "Workload can not be built without a runtime config." + )); + } +} \ No newline at end of file diff --git a/src/components/workload_state_mod/mod.rs b/src/components/workload_state_mod/mod.rs new file mode 100644 index 0000000..b766ede --- /dev/null +++ b/src/components/workload_state_mod/mod.rs @@ -0,0 +1,26 @@ +// Copyright (c) 2024 Elektrobit Automotive GmbH +// +// This program and the accompanying materials are made available under the +// terms of the Apache License, Version 2.0 which is available at +// https://www.apache.org/licenses/LICENSE-2.0. +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. +// +// SPDX-License-Identifier: Apache-2.0 + +mod workload_state; +mod workload_state_enums; +mod workload_execution_state; +mod workload_instance_name; + +pub use workload_state::{WorkloadState, WorkloadStateCollection}; +pub use workload_state_enums::{WorkloadStateEnum, WorkloadSubStateEnum}; +pub use workload_execution_state::WorkloadExecutionState; +pub use workload_instance_name::WorkloadInstanceName; + +#[cfg(test)] +pub use workload_state::generate_test_workload_states_proto; \ No newline at end of file diff --git a/src/components/workload_state_mod/workload_execution_state.rs b/src/components/workload_state_mod/workload_execution_state.rs new file mode 100644 index 0000000..35cfed6 --- /dev/null +++ b/src/components/workload_state_mod/workload_execution_state.rs @@ -0,0 +1,159 @@ +// Copyright (c) 2024 Elektrobit Automotive GmbH +// +// This program and the accompanying materials are made available under the +// terms of the Apache License, Version 2.0 which is available at +// https://www.apache.org/licenses/LICENSE-2.0. +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. +// +// SPDX-License-Identifier: Apache-2.0 + +use std::fmt; + +use api::ank_base; +use super::workload_state_enums::{WorkloadStateEnum, WorkloadSubStateEnum}; + + +#[derive(Debug, Default, Clone)] +pub struct WorkloadExecutionState{ + pub state: WorkloadStateEnum, + pub substate: WorkloadSubStateEnum, + pub additional_info: String, +} + +impl WorkloadExecutionState { + pub fn new(exec_state: ank_base::ExecutionState) -> WorkloadExecutionState { + match exec_state.execution_state_enum { + Some(execution_state_enum) => { + let (state, substate) = WorkloadExecutionState::parse_state(execution_state_enum); + WorkloadExecutionState { + state, + substate, + additional_info: exec_state.additional_info, + } + }, + None => WorkloadExecutionState { + state: WorkloadStateEnum::NotScheduled, + substate: WorkloadSubStateEnum::NotScheduled, + additional_info: exec_state.additional_info, + } + } + } + + pub fn to_dict(&self) -> serde_yaml::Mapping { + let mut map = serde_yaml::Mapping::new(); + map.insert(serde_yaml::Value::String("state".to_string()), serde_yaml::Value::String(self.state.to_string())); + map.insert(serde_yaml::Value::String("substate".to_string()), serde_yaml::Value::String(self.substate.to_string())); + map.insert(serde_yaml::Value::String("additional_info".to_string()), serde_yaml::Value::String(self.additional_info.clone())); + map + } + + pub fn parse_state(exec_state: ank_base::execution_state::ExecutionStateEnum) -> (WorkloadStateEnum, WorkloadSubStateEnum) { + let (state, value) = match exec_state { + ank_base::execution_state::ExecutionStateEnum::AgentDisconnected(value) => { + (WorkloadStateEnum::AgentDisconnected, value) + } + ank_base::execution_state::ExecutionStateEnum::Pending(value) => { + (WorkloadStateEnum::Pending, value) + } + ank_base::execution_state::ExecutionStateEnum::Running(value) => { + (WorkloadStateEnum::Running, value) + } + ank_base::execution_state::ExecutionStateEnum::Stopping(value) => { + (WorkloadStateEnum::Stopping, value) + } + ank_base::execution_state::ExecutionStateEnum::Succeeded(value) => { + (WorkloadStateEnum::Succeeded, value) + } + ank_base::execution_state::ExecutionStateEnum::Failed(value) => { + (WorkloadStateEnum::Failed, value) + } + ank_base::execution_state::ExecutionStateEnum::NotScheduled(value) => { + (WorkloadStateEnum::NotScheduled, value) + } + ank_base::execution_state::ExecutionStateEnum::Removed(value) => { + (WorkloadStateEnum::Removed, value) + } + }; + (state, WorkloadSubStateEnum::new(&state, value).unwrap()) + } +} + +impl fmt::Display for WorkloadExecutionState { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{} ({}): {}", self.state, self.substate, self.additional_info) + } +} + +////////////////////////////////////////////////////////////////////////////// +// ######## ####### ######### ######### // +// ## ## ## ## // +// ## ##### ######### ## // +// ## ## ## ## // +// ## ####### ######### ## // +////////////////////////////////////////////////////////////////////////////// + +#[cfg(test)] +mod tests { + use super::ank_base; + use super::{WorkloadExecutionState, WorkloadStateEnum, WorkloadSubStateEnum}; + + #[test] + fn test_default_functionality() { + let default_exec_state = WorkloadExecutionState::new( + ank_base::ExecutionState { + execution_state_enum: None, + additional_info: "No state present".to_string(), + } + ); + assert_eq!(default_exec_state.state, WorkloadStateEnum::NotScheduled); + assert_eq!(default_exec_state.substate, WorkloadSubStateEnum::NotScheduled); + assert_eq!(default_exec_state.additional_info, "No state present"); + assert_eq!(default_exec_state.to_string(), "NotScheduled (NotScheduled): No state present"); + + let mut expected_dict = serde_yaml::Mapping::new(); + expected_dict.insert(serde_yaml::Value::String("state".to_string()), serde_yaml::Value::String("NotScheduled".to_string())); + expected_dict.insert(serde_yaml::Value::String("substate".to_string()), serde_yaml::Value::String("NotScheduled".to_string())); + expected_dict.insert(serde_yaml::Value::String("additional_info".to_string()), serde_yaml::Value::String("No state present".to_string())); + + assert_eq!(default_exec_state.to_dict(), expected_dict); + } + + macro_rules! generate_test_for_workload_execution_state { + ($test_name:ident, $state:ident, $substate:ident, $ank_base_state:expr) => { + #[test] + fn $test_name() { + let exec_state = WorkloadExecutionState::new( + ank_base::ExecutionState { + execution_state_enum: Some($ank_base_state), + additional_info: "Additional info".to_string(), + } + ); + assert_eq!(exec_state.state, WorkloadStateEnum::$state); + assert_eq!(exec_state.substate, WorkloadSubStateEnum::$substate); + assert_eq!(exec_state.additional_info, "Additional info"); + } + }; + } + + generate_test_for_workload_execution_state!(test_agent_disconnected, AgentDisconnected, AgentDisconnected, + ank_base::execution_state::ExecutionStateEnum::AgentDisconnected(ank_base::AgentDisconnected::AgentDisconnected as i32)); + generate_test_for_workload_execution_state!(test_pending, Pending, PendingWaitingToStart, + ank_base::execution_state::ExecutionStateEnum::Pending(ank_base::Pending::WaitingToStart as i32)); + generate_test_for_workload_execution_state!(test_running, Running, RunningOk, + ank_base::execution_state::ExecutionStateEnum::Running(ank_base::Running::Ok as i32)); + generate_test_for_workload_execution_state!(test_stopping, Stopping, StoppingWaitingToStop, + ank_base::execution_state::ExecutionStateEnum::Stopping(ank_base::Stopping::WaitingToStop as i32)); + generate_test_for_workload_execution_state!(test_succeeded, Succeeded, SucceededOk, + ank_base::execution_state::ExecutionStateEnum::Succeeded(ank_base::Succeeded::Ok as i32)); + generate_test_for_workload_execution_state!(test_failed, Failed, FailedExecFailed, + ank_base::execution_state::ExecutionStateEnum::Failed(ank_base::Failed::ExecFailed as i32)); + generate_test_for_workload_execution_state!(test_not_scheduled, NotScheduled, NotScheduled, + ank_base::execution_state::ExecutionStateEnum::NotScheduled(ank_base::NotScheduled::NotScheduled as i32)); + generate_test_for_workload_execution_state!(test_removed, Removed, Removed, + ank_base::execution_state::ExecutionStateEnum::Removed(ank_base::Removed::Removed as i32)); +} \ No newline at end of file diff --git a/src/components/workload_state_mod/workload_instance_name.rs b/src/components/workload_state_mod/workload_instance_name.rs new file mode 100644 index 0000000..0e868be --- /dev/null +++ b/src/components/workload_state_mod/workload_instance_name.rs @@ -0,0 +1,89 @@ +// Copyright (c) 2024 Elektrobit Automotive GmbH +// +// This program and the accompanying materials are made available under the +// terms of the Apache License, Version 2.0 which is available at +// https://www.apache.org/licenses/LICENSE-2.0. +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. +// +// SPDX-License-Identifier: Apache-2.0 + +use std::fmt; + + +#[derive(Debug, Default, Clone, PartialEq, Eq)] +pub struct WorkloadInstanceName{ + pub agent_name: String, + pub workload_name: String, + pub workload_id: String, +} + +impl WorkloadInstanceName { + pub fn new(agent_name: String, workload_name: String, workload_id: String) -> WorkloadInstanceName { + WorkloadInstanceName { + agent_name, + workload_name, + workload_id, + } + } + + pub fn to_dict(&self) -> serde_yaml::Mapping { + let mut map = serde_yaml::Mapping::new(); + map.insert(serde_yaml::Value::String("agent_name".to_string()), serde_yaml::Value::String(self.agent_name.clone())); + map.insert(serde_yaml::Value::String("workload_name".to_string()), serde_yaml::Value::String(self.workload_name.clone())); + map.insert(serde_yaml::Value::String("workload_id".to_string()), serde_yaml::Value::String(self.workload_id.clone())); + map + } + + pub fn get_filter_mask(&self) -> String { + format!("workloadStates.{}.{}.{}", self.agent_name, self.workload_name, self.workload_id) + } +} + +impl fmt::Display for WorkloadInstanceName { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}.{}.{}", self.workload_name, self.workload_id, self.agent_name) + } +} + +////////////////////////////////////////////////////////////////////////////// +// ######## ####### ######### ######### // +// ## ## ## ## // +// ## ##### ######### ## // +// ## ## ## ## // +// ## ####### ######### ## // +////////////////////////////////////////////////////////////////////////////// + +#[cfg(test)] +mod tests { + use super::WorkloadInstanceName; + + #[test] + fn test_instance_name() { + let instance_name = WorkloadInstanceName::new( + "agent_Test".to_string(), "workload_Test".to_string(), "1234".to_string() + ); + assert_eq!(instance_name.agent_name, "agent_Test"); + assert_eq!(instance_name.workload_name, "workload_Test"); + assert_eq!(instance_name.workload_id, "1234"); + + assert_eq!(instance_name.to_string(), "workload_Test.1234.agent_Test"); + assert_eq!(instance_name.get_filter_mask(), "workloadStates.agent_Test.workload_Test.1234"); + assert_eq!(instance_name.to_dict(), serde_yaml::Mapping::from_iter([ + (serde_yaml::Value::String("agent_name".to_string()), serde_yaml::Value::String("agent_Test".to_string())), + (serde_yaml::Value::String("workload_name".to_string()), serde_yaml::Value::String("workload_Test".to_string())), + (serde_yaml::Value::String("workload_id".to_string()), serde_yaml::Value::String("1234".to_string())), + ])); + + let mut another_instance_name = WorkloadInstanceName::new( + "agent_Test".to_string(), "workload_Test".to_string(), "1234".to_string() + ); + assert_eq!(instance_name, another_instance_name); + another_instance_name.agent_name = "agent_Test2".to_string(); + assert_ne!(instance_name, another_instance_name); + } +} \ No newline at end of file diff --git a/src/components/workload_state_mod/workload_state.rs b/src/components/workload_state_mod/workload_state.rs new file mode 100644 index 0000000..cd12884 --- /dev/null +++ b/src/components/workload_state_mod/workload_state.rs @@ -0,0 +1,248 @@ +// Copyright (c) 2024 Elektrobit Automotive GmbH +// +// This program and the accompanying materials are made available under the +// terms of the Apache License, Version 2.0 which is available at +// https://www.apache.org/licenses/LICENSE-2.0. +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. +// +// SPDX-License-Identifier: Apache-2.0 + +use std::fmt; +use std::collections::HashMap; + +use api::ank_base; +use super::workload_execution_state::WorkloadExecutionState; +use super::workload_instance_name::WorkloadInstanceName; +use crate::AnkaiosError; + +type ExecutionsStatesForId = HashMap; +type ExecutionsStatesOfWorkload = HashMap; +type WorkloadStatesMap = HashMap; + +#[derive(Debug, Default)] +pub struct WorkloadState { + pub execution_state: WorkloadExecutionState, + pub workload_instance_name: WorkloadInstanceName, +} + +#[derive(Debug, Default)] +pub struct WorkloadStateCollection { + workload_states: WorkloadStatesMap, +} + +impl WorkloadState { + pub fn new_from_ank_base(agent_name: String, workload_name: String, workload_id: String, state: ank_base::ExecutionState) -> WorkloadState { + WorkloadState { + execution_state: WorkloadExecutionState::new(state), + workload_instance_name: WorkloadInstanceName::new(agent_name, workload_name, workload_id), + } + } + + pub fn new_from_exec_state(agent_name: String, workload_name: String, workload_id: String, exec_state: WorkloadExecutionState) -> WorkloadState { + WorkloadState { + execution_state: exec_state, + workload_instance_name: WorkloadInstanceName::new(agent_name, workload_name, workload_id), + } + } +} + +impl fmt::Display for WorkloadState { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}: {}", self.workload_instance_name, self.execution_state) + } +} + +impl WorkloadStateCollection { + pub fn new() -> WorkloadStateCollection { + WorkloadStateCollection { + workload_states: HashMap::new(), + } + } + + pub fn new_from_proto(workload_states_map: &ank_base::WorkloadStatesMap) -> WorkloadStateCollection { + let mut workload_states = WorkloadStateCollection::new(); + for (agent_name, workloads) in workload_states_map.agent_state_map.iter() { + for (workload_name, workload_states_for_id) in workloads.wl_name_state_map.iter() { + for (workload_id, state) in workload_states_for_id.id_state_map.iter() { + let workload_state = WorkloadState::new_from_ank_base(agent_name.clone(), workload_name.clone(), workload_id.clone(), state.clone()); + workload_states.add_workload_state(workload_state); + } + } + } + workload_states + } + + pub fn add_workload_state(&mut self, workload_state: WorkloadState) { + let agent_name = workload_state.workload_instance_name.agent_name.clone(); + let workload_name = workload_state.workload_instance_name.workload_name.clone(); + let workload_id = workload_state.workload_instance_name.workload_id.clone(); + + if !self.workload_states.contains_key(&agent_name) { + self.workload_states.insert(agent_name.clone(), ExecutionsStatesOfWorkload::new()); + } + + if !self.workload_states.get(&agent_name).unwrap().contains_key(&workload_name) { + self.workload_states.get_mut(&agent_name).unwrap().insert(workload_name.clone(), ExecutionsStatesForId::new()); + } + + self.workload_states.get_mut(&agent_name).unwrap().get_mut(&workload_name).unwrap().insert(workload_id, workload_state.execution_state); + } + + pub fn get_as_dict(&self) -> serde_yaml::Mapping { + let mut map = serde_yaml::Mapping::new(); + for (agent_name, workload_states) in self.workload_states.iter() { + let mut agent_map = serde_yaml::Mapping::new(); + for (workload_name, workload_states_for_id) in workload_states.iter() { + let mut workload_map = serde_yaml::Mapping::new(); + for (workload_id, workload_state) in workload_states_for_id.iter() { + workload_map.insert(serde_yaml::Value::String(workload_id.clone()), serde_yaml::Value::Mapping(workload_state.to_dict())); + } + agent_map.insert(serde_yaml::Value::String(workload_name.clone()), serde_yaml::Value::Mapping(workload_map)); + } + map.insert(serde_yaml::Value::String(agent_name.clone()), serde_yaml::Value::Mapping(agent_map)); + } + map + } + + pub fn get_as_list(&self) -> Vec { + let mut list = Vec::new(); + for (agent_name, workload_states_for_agent) in self.workload_states.iter() { + for (workload_name, workload_states_for_id) in workload_states_for_agent.iter() { + for (workload_id, workload_state) in workload_states_for_id.iter() { + let workload_instance_name = WorkloadInstanceName::new( + agent_name.clone(), + workload_name.clone(), + workload_id.clone(), + ); + list.push(WorkloadState { + execution_state: workload_state.clone(), + workload_instance_name, + }); + } + } + } + list + } + + pub fn get_for_instance_name(&self, instance_name: &WorkloadInstanceName) -> Option<&WorkloadExecutionState> { + self.workload_states.get(&instance_name.agent_name) + .and_then(|workloads| workloads.get(&instance_name.workload_name)) + .and_then(|workload| workload.get(&instance_name.workload_id)) + } +} + +impl TryFrom for WorkloadStateCollection { + type Error = AnkaiosError; + + fn try_from(proto: ank_base::WorkloadStatesMap) -> Result { + Ok(Self::new_from_proto(&proto)) + } +} + +////////////////////////////////////////////////////////////////////////////// +// ######## ####### ######### ######### // +// ## ## ## ## // +// ## ##### ######### ## // +// ## ## ## ## // +// ## ####### ######### ## // +////////////////////////////////////////////////////////////////////////////// + +#[cfg(any(feature = "test_utils", test))] +pub fn generate_test_workload_states_proto() -> ank_base::WorkloadStatesMap { + ank_base::WorkloadStatesMap { agent_state_map: HashMap::from([ + ("agent_A".to_string(), ank_base::ExecutionsStatesOfWorkload{ + wl_name_state_map: HashMap::from([ + ("nginx".to_string(), ank_base::ExecutionsStatesForId{ + id_state_map: HashMap::from([ + ("1234".to_string(), ank_base::ExecutionState{ + execution_state_enum: Some(ank_base::execution_state::ExecutionStateEnum::Succeeded(ank_base::Succeeded::Ok as i32)), + additional_info: "Random info".to_string(), + }), + ]) + }), + ]) + },), + ("agent_B".to_string(), ank_base::ExecutionsStatesOfWorkload{ + wl_name_state_map: HashMap::from([ + ("nginx".to_string(), ank_base::ExecutionsStatesForId{ + id_state_map: HashMap::from([ + ("5678".to_string(), ank_base::ExecutionState{ + execution_state_enum: Some(ank_base::execution_state::ExecutionStateEnum::Pending(ank_base::Pending::WaitingToStart as i32)), + additional_info: "Random info".to_string(), + }), + ]) + }), + ("dyn_nginx".to_string(), ank_base::ExecutionsStatesForId{ + id_state_map: HashMap::from([ + ("9012".to_string(), ank_base::ExecutionState{ + execution_state_enum: Some(ank_base::execution_state::ExecutionStateEnum::Stopping(ank_base::Stopping::WaitingToStop as i32)), + additional_info: "Random info".to_string(), + }), + ]) + }), + ]) + },), + ])} +} + +#[cfg(test)] +mod tests { + use crate::components::workload_state_mod::{WorkloadStateEnum, WorkloadSubStateEnum}; + + use super::{ank_base, WorkloadExecutionState, WorkloadInstanceName, WorkloadState, WorkloadStateCollection}; + use super::generate_test_workload_states_proto; + + #[test] + fn test_workload_state() { + let agent_name = "agent_name".to_string(); + let workload_name = "workload_name".to_string(); + let workload_id = "workload_id".to_string(); + let state = ank_base::ExecutionState { + execution_state_enum: Some(ank_base::execution_state::ExecutionStateEnum::Pending(ank_base::Pending::WaitingToStart as i32)), + additional_info: "additional_info".to_string(), + }; + let exec_state = WorkloadExecutionState::new(state.clone()); + + let workload_state_ank_base = WorkloadState::new_from_ank_base(agent_name.clone(), workload_name.clone(), workload_id.clone(), state.clone()); + let workload_state_exec_state = WorkloadState::new_from_exec_state(agent_name.clone(), workload_name.clone(), workload_id.clone(), exec_state.clone()); + + assert_eq!(workload_state_ank_base.to_string(), workload_state_exec_state.to_string()); + assert_eq!(workload_state_ank_base.execution_state.state, WorkloadStateEnum::Pending); + assert_eq!(workload_state_ank_base.execution_state.substate, WorkloadSubStateEnum::PendingWaitingToStart); + assert_eq!(workload_state_ank_base.execution_state.additional_info, "additional_info"); + assert_eq!(workload_state_ank_base.workload_instance_name.agent_name, agent_name); + assert_eq!(workload_state_ank_base.workload_instance_name.workload_name, workload_name); + assert_eq!(workload_state_ank_base.workload_instance_name.workload_id, workload_id); + } + + #[test] + fn test_workload_state_collection() { + let state_collection = WorkloadStateCollection::new_from_proto( + &generate_test_workload_states_proto()); + let mut state_list = state_collection.get_as_list(); + // The list comes unsorted, thus the test is not deterministic + state_list.sort_by(|a, b| a.workload_instance_name.agent_name.cmp(&b.workload_instance_name.agent_name)); + assert_eq!(state_list.len(), 3); + assert_eq!(state_list[0].workload_instance_name.agent_name, "agent_A"); + assert_eq!(state_list[0].workload_instance_name.workload_name, "nginx"); + assert_eq!(state_list[0].workload_instance_name.workload_id, "1234"); + assert_eq!(state_list[1].workload_instance_name.agent_name, "agent_B"); + assert_eq!(state_list[2].workload_instance_name.agent_name, "agent_B"); + + let state_dict = state_collection.get_as_dict(); + assert_eq!(state_dict.len(), 2); + assert_eq!(state_dict.get("agent_A".to_string()).unwrap().as_mapping().unwrap().len(), 1); + assert_eq!(state_dict.get("agent_B".to_string()).unwrap().as_mapping().unwrap().len(), 2); + + let workload_instance_name = WorkloadInstanceName::new("agent_B".to_string(), "nginx".to_string(), "5678".to_string()); + let workload_state = state_collection.get_for_instance_name(&workload_instance_name).unwrap(); + assert_eq!(workload_state.state, WorkloadStateEnum::Pending); + assert_eq!(workload_state.substate, WorkloadSubStateEnum::PendingWaitingToStart); + assert_eq!(workload_state.additional_info, "Random info"); + } +} \ No newline at end of file diff --git a/src/components/workload_state_mod/workload_state_enums.rs b/src/components/workload_state_mod/workload_state_enums.rs new file mode 100644 index 0000000..cb00537 --- /dev/null +++ b/src/components/workload_state_mod/workload_state_enums.rs @@ -0,0 +1,345 @@ +// Copyright (c) 2024 Elektrobit Automotive GmbH +// +// This program and the accompanying materials are made available under the +// terms of the Apache License, Version 2.0 which is available at +// https://www.apache.org/licenses/LICENSE-2.0. +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. +// +// SPDX-License-Identifier: Apache-2.0 + +use std::fmt; + +use api::ank_base; + +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum WorkloadStateEnum { + AgentDisconnected = 0, + Pending = 1, + Running = 2, + Stopping = 3, + Succeeded = 4, + Failed = 5, + NotScheduled = 6, + Removed = 7, +} + +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum WorkloadSubStateEnum { + AgentDisconnected = 0, + PendingInitial = 1, + PendingWaitingToStart = 2, + PendingStarting = 3, + PendingStartingFailed = 4, + RunningOk = 5, + Stopping = 6, + StoppingWaitingToStop = 7, + StoppingRequestedAtRuntime = 8, + StoppingDeleteFailed = 9, + SucceededOk = 10, + FailedExecFailed = 11, + FailedUnknown = 12, + FailedLost = 13, + NotScheduled = 14, + Removed = 15, +} + +impl WorkloadStateEnum { + pub fn new_from_str>(value: T) -> WorkloadStateEnum { + match value.into().as_str() { + "AgentDisconnected" => WorkloadStateEnum::AgentDisconnected, + "Pending" => WorkloadStateEnum::Pending, + "Running" => WorkloadStateEnum::Running, + "Stopping" => WorkloadStateEnum::Stopping, + "Succeeded" => WorkloadStateEnum::Succeeded, + "Failed" => WorkloadStateEnum::Failed, + "NotScheduled" => WorkloadStateEnum::NotScheduled, + "Removed" => WorkloadStateEnum::Removed, + _ => panic!("Invalid value for WorkloadStateEnum"), + } + } + + pub fn as_i32(&self) -> i32 { + *self as i32 + } +} + +impl std::fmt::Display for WorkloadStateEnum { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let state_str = match self { + WorkloadStateEnum::AgentDisconnected => "AgentDisconnected", + WorkloadStateEnum::Pending => "Pending", + WorkloadStateEnum::Running => "Running", + WorkloadStateEnum::Stopping => "Stopping", + WorkloadStateEnum::Succeeded => "Succeeded", + WorkloadStateEnum::Failed => "Failed", + WorkloadStateEnum::NotScheduled => "NotScheduled", + WorkloadStateEnum::Removed => "Removed", + }; + write!(f, "{}", state_str) + } +} + +impl std::str::FromStr for WorkloadStateEnum { + type Err = (); + + fn from_str(s: &str) -> Result { + match s { + "AgentDisconnected" => Ok(WorkloadStateEnum::AgentDisconnected), + "Pending" => Ok(WorkloadStateEnum::Pending), + "Running" => Ok(WorkloadStateEnum::Running), + "Stopping" => Ok(WorkloadStateEnum::Stopping), + "Succeeded" => Ok(WorkloadStateEnum::Succeeded), + "Failed" => Ok(WorkloadStateEnum::Failed), + "NotScheduled" => Ok(WorkloadStateEnum::NotScheduled), + "Removed" => Ok(WorkloadStateEnum::Removed), + _ => Err(()), + } + } +} + +impl WorkloadSubStateEnum { + pub fn new(state: &WorkloadStateEnum, value: i32) -> Result { + match state { + WorkloadStateEnum::AgentDisconnected => match ank_base::AgentDisconnected::from_i32(value) { + Some(ank_base::AgentDisconnected::AgentDisconnected) => Ok(WorkloadSubStateEnum::AgentDisconnected), + None => Err("Invalid value for state AgentDisconnected".to_string()), + }, + WorkloadStateEnum::Pending => match ank_base::Pending::from_i32(value) { + Some(ank_base::Pending::Initial) => Ok(WorkloadSubStateEnum::PendingInitial), + Some(ank_base::Pending::WaitingToStart) => Ok(WorkloadSubStateEnum::PendingWaitingToStart), + Some(ank_base::Pending::Starting) => Ok(WorkloadSubStateEnum::PendingStarting), + Some(ank_base::Pending::StartingFailed) => Ok(WorkloadSubStateEnum::PendingStartingFailed), + None => Err("Invalid value for state Pending".to_string()), + }, + WorkloadStateEnum::Running => match ank_base::Running::from_i32(value) { + Some(ank_base::Running::Ok) => Ok(WorkloadSubStateEnum::RunningOk), + None => Err("Invalid value for state Running".to_string()), + }, + WorkloadStateEnum::Stopping => match ank_base::Stopping::from_i32(value) { + Some(ank_base::Stopping::Stopping) => Ok(WorkloadSubStateEnum::Stopping), + Some(ank_base::Stopping::WaitingToStop) => Ok(WorkloadSubStateEnum::StoppingWaitingToStop), + Some(ank_base::Stopping::RequestedAtRuntime) => Ok(WorkloadSubStateEnum::StoppingRequestedAtRuntime), + Some(ank_base::Stopping::DeleteFailed) => Ok(WorkloadSubStateEnum::StoppingDeleteFailed), + None => Err("Invalid value for state Stopping".to_string()), + }, + WorkloadStateEnum::Succeeded => match ank_base::Succeeded::from_i32(value) { + Some(ank_base::Succeeded::Ok) => Ok(WorkloadSubStateEnum::SucceededOk), + None => Err("Invalid value for state Succeeded".to_string()), + }, + WorkloadStateEnum::Failed => match ank_base::Failed::from_i32(value) { + Some(ank_base::Failed::ExecFailed) => Ok(WorkloadSubStateEnum::FailedExecFailed), + Some(ank_base::Failed::Unknown) => Ok(WorkloadSubStateEnum::FailedUnknown), + Some(ank_base::Failed::Lost) => Ok(WorkloadSubStateEnum::FailedLost), + None => Err("Invalid value for state Failed".to_string()), + }, + WorkloadStateEnum::NotScheduled => match ank_base::NotScheduled::from_i32(value) { + Some(ank_base::NotScheduled::NotScheduled) => Ok(WorkloadSubStateEnum::NotScheduled), + None => Err("Invalid value for state NotScheduled".to_string()), + }, + WorkloadStateEnum::Removed => match ank_base::Removed::from_i32(value) { + Some(ank_base::Removed::Removed) => Ok(WorkloadSubStateEnum::Removed), + None => Err("Invalid value for state Removed".to_string()), + }, + } + } + + pub fn to_i32(self) -> i32 { + match self { + WorkloadSubStateEnum::AgentDisconnected => ank_base::AgentDisconnected::AgentDisconnected as i32, + WorkloadSubStateEnum::PendingInitial => ank_base::Pending::Initial as i32, + WorkloadSubStateEnum::PendingWaitingToStart => ank_base::Pending::WaitingToStart as i32, + WorkloadSubStateEnum::PendingStarting => ank_base::Pending::Starting as i32, + WorkloadSubStateEnum::PendingStartingFailed => ank_base::Pending::StartingFailed as i32, + WorkloadSubStateEnum::RunningOk => ank_base::Running::Ok as i32, + WorkloadSubStateEnum::Stopping => ank_base::Stopping::Stopping as i32, + WorkloadSubStateEnum::StoppingWaitingToStop => ank_base::Stopping::WaitingToStop as i32, + WorkloadSubStateEnum::StoppingRequestedAtRuntime => ank_base::Stopping::RequestedAtRuntime as i32, + WorkloadSubStateEnum::StoppingDeleteFailed => ank_base::Stopping::DeleteFailed as i32, + WorkloadSubStateEnum::SucceededOk => ank_base::Succeeded::Ok as i32, + WorkloadSubStateEnum::FailedExecFailed => ank_base::Failed::ExecFailed as i32, + WorkloadSubStateEnum::FailedUnknown => ank_base::Failed::Unknown as i32, + WorkloadSubStateEnum::FailedLost => ank_base::Failed::Lost as i32, + WorkloadSubStateEnum::NotScheduled => ank_base::NotScheduled::NotScheduled as i32, + WorkloadSubStateEnum::Removed => ank_base::Removed::Removed as i32, + } + } +} + +impl fmt::Display for WorkloadSubStateEnum { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let substate_str = match self { + WorkloadSubStateEnum::AgentDisconnected => "AgentDisconnected", + WorkloadSubStateEnum::PendingInitial => "PendingInitial", + WorkloadSubStateEnum::PendingWaitingToStart => "PendingWaitingToStart", + WorkloadSubStateEnum::PendingStarting => "PendingStarting", + WorkloadSubStateEnum::PendingStartingFailed => "PendingStartingFailed", + WorkloadSubStateEnum::RunningOk => "RunningOk", + WorkloadSubStateEnum::Stopping => "Stopping", + WorkloadSubStateEnum::StoppingWaitingToStop => "StoppingWaitingToStop", + WorkloadSubStateEnum::StoppingRequestedAtRuntime => "StoppingRequestedAtRuntime", + WorkloadSubStateEnum::StoppingDeleteFailed => "StoppingDeleteFailed", + WorkloadSubStateEnum::SucceededOk => "SucceededOk", + WorkloadSubStateEnum::FailedExecFailed => "FailedExecFailed", + WorkloadSubStateEnum::FailedUnknown => "FailedUnknown", + WorkloadSubStateEnum::FailedLost => "FailedLost", + WorkloadSubStateEnum::NotScheduled => "NotScheduled", + WorkloadSubStateEnum::Removed => "Removed", + }; + write!(f, "{}", substate_str) + } +} + +impl std::str::FromStr for WorkloadSubStateEnum { + type Err = (); + + fn from_str(s: &str) -> Result { + match s { + "AgentDisconnected" => Ok(WorkloadSubStateEnum::AgentDisconnected), + "PendingInitial" => Ok(WorkloadSubStateEnum::PendingInitial), + "PendingWaitingToStart" => Ok(WorkloadSubStateEnum::PendingWaitingToStart), + "PendingStarting" => Ok(WorkloadSubStateEnum::PendingStarting), + "PendingStartingFailed" => Ok(WorkloadSubStateEnum::PendingStartingFailed), + "RunningOk" => Ok(WorkloadSubStateEnum::RunningOk), + "Stopping" => Ok(WorkloadSubStateEnum::Stopping), + "StoppingWaitingToStop" => Ok(WorkloadSubStateEnum::StoppingWaitingToStop), + "StoppingRequestedAtRuntime" => Ok(WorkloadSubStateEnum::StoppingRequestedAtRuntime), + "StoppingDeleteFailed" => Ok(WorkloadSubStateEnum::StoppingDeleteFailed), + "SucceededOk" => Ok(WorkloadSubStateEnum::SucceededOk), + "FailedExecFailed" => Ok(WorkloadSubStateEnum::FailedExecFailed), + "FailedUnknown" => Ok(WorkloadSubStateEnum::FailedUnknown), + "FailedLost" => Ok(WorkloadSubStateEnum::FailedLost), + "NotScheduled" => Ok(WorkloadSubStateEnum::NotScheduled), + "Removed" => Ok(WorkloadSubStateEnum::Removed), + _ => Err(()), + } + } +} + +////////////////////////////////////////////////////////////////////////////// +// ######## ####### ######### ######### // +// ## ## ## ## // +// ## ##### ######### ## // +// ## ## ## ## // +// ## ####### ######### ## // +////////////////////////////////////////////////////////////////////////////// + +#[cfg(test)] +mod tests { + use std::str::FromStr; + + use api::ank_base; + + use super::{WorkloadStateEnum, WorkloadSubStateEnum}; + + #[test] + fn test_workload_state_enum_helpers() { + let state = WorkloadStateEnum::default(); + assert!(WorkloadStateEnum::is_valid(0)); + assert_eq!(WorkloadStateEnum::from_i32(0).unwrap(), state); + } + + macro_rules! generate_test_for_workload_state_enum { + ($test_name:ident, $enum_val:ident, $idx:expr) => { + #[test] + fn $test_name() { + let state = WorkloadStateEnum::$enum_val; + assert_eq!(state.as_i32(), $idx); + assert_eq!(state.to_string(), stringify!($enum_val)); + assert_eq!(state, WorkloadStateEnum::new_from_str(stringify!($enum_val))); + assert_eq!(state, stringify!($enum_val).parse().unwrap()); + } + }; + } + + generate_test_for_workload_state_enum!(test_workload_state_enum_agent_disconnected, AgentDisconnected, 0); + generate_test_for_workload_state_enum!(test_workload_state_enum_pending, Pending, 1); + generate_test_for_workload_state_enum!(test_workload_state_enum_running, Running, 2); + generate_test_for_workload_state_enum!(test_workload_state_enum_stopping, Stopping, 3); + generate_test_for_workload_state_enum!(test_workload_state_enum_succeeded, Succeeded, 4); + generate_test_for_workload_state_enum!(test_workload_state_enum_failed, Failed, 5); + generate_test_for_workload_state_enum!(test_workload_state_enum_not_scheduled, NotScheduled, 6); + generate_test_for_workload_state_enum!(test_workload_state_enum_removed, Removed, 7); + + #[test] + #[should_panic] + fn test_workload_state_str_invalid() { + assert!(WorkloadStateEnum::from_str(stringify!(Invalid)).is_err()); + WorkloadStateEnum::new_from_str("Invalid"); + } + + #[test] + fn test_workload_sub_state_enum_helpers() { + let substate = WorkloadSubStateEnum::default(); + assert_eq!(substate.to_i32(), 0); + assert_eq!(WorkloadSubStateEnum::from_i32(0).unwrap(), substate); + } + + macro_rules! generate_test_for_workload_state_enum { + ($test_name:ident, $enum_val:ident, $state_val:ident, $idx:expr) => { + #[test] + fn $test_name() { + let substate = WorkloadSubStateEnum::new(&WorkloadStateEnum::$state_val, $idx).unwrap(); + assert_eq!(substate.to_i32(), $idx); + assert_eq!(substate.to_string(), stringify!($enum_val)); + assert_eq!(substate, stringify!($enum_val).parse().unwrap()); + } + }; + } + + generate_test_for_workload_state_enum!(test_workload_substate_enum_agent_disconnected, + AgentDisconnected, AgentDisconnected, ank_base::AgentDisconnected::AgentDisconnected as i32); + generate_test_for_workload_state_enum!(test_workload_substate_enum_pending_initial, + PendingInitial, Pending, ank_base::Pending::Initial as i32); + generate_test_for_workload_state_enum!(test_workload_substate_enum_pending_waiting_to_start, + PendingWaitingToStart, Pending, ank_base::Pending::WaitingToStart as i32); + generate_test_for_workload_state_enum!(test_workload_substate_enum_pending_starting, + PendingStarting, Pending, ank_base::Pending::Starting as i32); + generate_test_for_workload_state_enum!(test_workload_substate_enum_pending_starting_failed, + PendingStartingFailed, Pending, ank_base::Pending::StartingFailed as i32); + generate_test_for_workload_state_enum!(test_workload_substate_enum_running_ok, + RunningOk, Running, ank_base::Running::Ok as i32); + generate_test_for_workload_state_enum!(test_workload_substate_enum_stopping, + Stopping, Stopping, ank_base::Stopping::Stopping as i32); + generate_test_for_workload_state_enum!(test_workload_substate_enum_stopping_waiting_to_stop, + StoppingWaitingToStop, Stopping, ank_base::Stopping::WaitingToStop as i32); + generate_test_for_workload_state_enum!(test_workload_substate_enum_stopping_requested_at_runtime, + StoppingRequestedAtRuntime, Stopping, ank_base::Stopping::RequestedAtRuntime as i32); + generate_test_for_workload_state_enum!(test_workload_substate_enum_stopping_delete_failed, + StoppingDeleteFailed, Stopping, ank_base::Stopping::DeleteFailed as i32); + generate_test_for_workload_state_enum!(test_workload_substate_enum_succeeded_ok, + SucceededOk, Succeeded, ank_base::Succeeded::Ok as i32); + generate_test_for_workload_state_enum!(test_workload_substate_enum_failed_exec_failed, + FailedExecFailed, Failed, ank_base::Failed::ExecFailed as i32); + generate_test_for_workload_state_enum!(test_workload_substate_enum_failed_unknown, + FailedUnknown, Failed, ank_base::Failed::Unknown as i32); + generate_test_for_workload_state_enum!(test_workload_substate_enum_failed_lost, + FailedLost, Failed, ank_base::Failed::Lost as i32); + generate_test_for_workload_state_enum!(test_workload_substate_enum_not_scheduled, + NotScheduled, NotScheduled, ank_base::NotScheduled::NotScheduled as i32); + generate_test_for_workload_state_enum!(test_workload_substate_enum_removed, + Removed, Removed, ank_base::Removed::Removed as i32); + + #[test] + fn test_workload_substate_enum_err() { + assert!(WorkloadSubStateEnum::new(&WorkloadStateEnum::AgentDisconnected, 20).is_err()); + assert!(WorkloadSubStateEnum::new(&WorkloadStateEnum::Pending, 20).is_err()); + assert!(WorkloadSubStateEnum::new(&WorkloadStateEnum::Running, 20).is_err()); + assert!(WorkloadSubStateEnum::new(&WorkloadStateEnum::Stopping, 20).is_err()); + assert!(WorkloadSubStateEnum::new(&WorkloadStateEnum::Succeeded, 20).is_err()); + assert!(WorkloadSubStateEnum::new(&WorkloadStateEnum::Failed, 20).is_err()); + assert!(WorkloadSubStateEnum::new(&WorkloadStateEnum::NotScheduled, 20).is_err()); + assert!(WorkloadSubStateEnum::new(&WorkloadStateEnum::Removed, 20).is_err()); + } + + #[test] + fn test_workload_substate_str_invalid() { + assert!(WorkloadSubStateEnum::from_str(stringify!(Invalid)).is_err()); + } +} \ No newline at end of file diff --git a/src/errors.rs b/src/errors.rs new file mode 100644 index 0000000..6e581bc --- /dev/null +++ b/src/errors.rs @@ -0,0 +1,38 @@ +// Copyright (c) 2024 Elektrobit Automotive GmbH +// +// This program and the accompanying materials are made available under the +// terms of the Apache License, Version 2.0 which is available at +// https://www.apache.org/licenses/LICENSE-2.0. +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. +// +// SPDX-License-Identifier: Apache-2.0 + +use thiserror::Error; + +#[derive(Error, Debug)] +pub enum AnkaiosError{ + #[error("IO Error: {0}")] + IoError(#[from] std::io::Error), + + #[error("Invalid value for field {0}: {1}.")] + WorkloadFieldError(String, String), + #[error("Workload builder error: {0}")] + WorkloadBuilderError(&'static str), + #[error("Invalid manifest: {0}")] + InvalidManifestError(String), + #[error("Connection closed: {0}")] + ConnectionClosedError(String), + #[error("Request error: {0}")] + RequestError(String), + #[error("Response error: {0}")] + ResponseError(String), + #[error("Control interface error: {0}")] + ControlInterfaceError(String), + #[error("Ankaios error: {0}")] + AnkaiosError(String) +} diff --git a/src/lib.rs b/src/lib.rs new file mode 100644 index 0000000..2a9a6b2 --- /dev/null +++ b/src/lib.rs @@ -0,0 +1,25 @@ +// Copyright (c) 2024 Elektrobit Automotive GmbH +// +// This program and the accompanying materials are made available under the +// terms of the Apache License, Version 2.0 which is available at +// https://www.apache.org/licenses/LICENSE-2.0. +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. +// +// SPDX-License-Identifier: Apache-2.0 + +mod errors; +pub use errors::AnkaiosError; + +mod components; +pub use components::workload_mod::{Workload, WorkloadBuilder}; +pub use components::workload_state_mod::WorkloadStateCollection; +pub use components::manifest::Manifest; +pub use components::complete_state::CompleteState; + +mod ankaios; +pub use ankaios::Ankaios;