diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000000000..6f83713f3e5bbf --- /dev/null +++ b/.gitignore @@ -0,0 +1,3 @@ +.gitsecret/keys/random_seed +hidden.tar +hidden/ diff --git a/.gitsecret/keys/mapping.cfg b/.gitsecret/keys/mapping.cfg new file mode 100644 index 00000000000000..e69de29bb2d1d6 diff --git a/.gitsecret/keys/pubring.kbx b/.gitsecret/keys/pubring.kbx new file mode 100644 index 00000000000000..11910e35e02a7d Binary files /dev/null and b/.gitsecret/keys/pubring.kbx differ diff --git a/.gitsecret/keys/pubring.kbx~ b/.gitsecret/keys/pubring.kbx~ new file mode 100644 index 00000000000000..4be7aa858229fb Binary files /dev/null and b/.gitsecret/keys/pubring.kbx~ differ diff --git a/.gitsecret/keys/trustdb.gpg b/.gitsecret/keys/trustdb.gpg new file mode 100644 index 00000000000000..0dd4d91764e6ea Binary files /dev/null and b/.gitsecret/keys/trustdb.gpg differ diff --git a/.gitsecret/paths/mapping.cfg b/.gitsecret/paths/mapping.cfg new file mode 100644 index 00000000000000..9cf08d9efdc2f7 --- /dev/null +++ b/.gitsecret/paths/mapping.cfg @@ -0,0 +1 @@ +hidden.tar: diff --git a/README.md b/README.md index 5e001a0f214b46..c927ee1e60c1ba 100644 --- a/README.md +++ b/README.md @@ -1,21 +1,32 @@ -# Intel internal Zephyr CI -## *operated by your friendly IAGS/FMOS Zephyr DevOps team* -### mail PDL: FMOS_DevOps - -## [DevOps Documentation Wiki](https://gitlab.devtools.intel.com/zephyrproject-rtos/ci/-/wikis/home) -## [Zephyr DevOps Overview](https://intel-my.sharepoint.com/:p:/p/christopher_g_turner/EfZ2TF9ElydPjpGBEAKiUkwBiFt5LFBZPI2aGO_HZnP7Wg?e=Bxeeho) - -# CI Terminology -* **Jenkins** - A popular open-source CI/CD tool. Zephyr DevOps operates several Jenkins master instances at zerobot2, zerobot-stg & zephyr-ci. -* **Agent** - Jenkins terminology for a remote computer used for building, testing in a CI/CD pipeline. Any computer capable of executing commands over ssh can be a Jenkins agent. -* **Test-agent** - Jenkins agent that's configured for device-testing -* **Build-agent** - Jenkins agent that's intended for virtual (qemu-only) sanitycheck jobs, not device-testing -* **Test-head** - A dedicated server that provides services for a set of agents, via test-network. -* **Test-net** - Private network linking a Testhead with a set of agents, implemented as a star-topology around a Gigabit Ethernet switch -* **PXE Boot** - Also known as "network boot", this service allows a computer to receive it's operating system via a network link, removing the requirement for a hard-disk -* **PXE Boot Docker (pxeboot.docker)** - Zephyr DevOps-created tool that provides PXE boot services & automates generation of PXE boot initramfs payload -* **Workspace** - A build directory created by Jenkins for a specific job. Test-agents can have multiple workspaces, one for each job. - -# Architecture - -![Zephyr CI Block Diagram](zephyrCI-block-diagrams-WW08-2021.png "zephyr CI block diagram WW08 2021") +# Intel 1RTOS / Zephyr internal CI repo +*a catch-all for DevOps services & documentation* + +**Contact: email to: FMOS_DevOps, cc: Vondrachek, Chris & Graydon, Connor** + +## hidden.tar.secret & accessing the hidden/ directory +DevOps infrastructure secrets & private configuration data is stored encrypted as hidden.tar.secret with access controlled by a GPG keyring. + +### To reveal contents of hidden.tar.secret into hidden/ : +1. Your public GPG key must be enrolled in the keyring in this repo. +2. Use our automation script to decrypt hidden.tar.secret & decompress to hidden/ + + ````trusted-gpg-user@ci.git/ $ ./reveal-hidden.sh```` + +3. Access protected files at hidden/ +4. If any changes are made, you MUST run ./hide-hidden.sh to capture changes & encrypt + +### To hide the contents of hidden/ & stage hidden.tar.secret for commit: +1. Your public GPG key must be enrolled in the keyring in this repo. +2. Use our automation script to tar & encrypt hidden/, and also stage the change for commit: + + ````trusted-gpg-user@ci.git/ $ ./hide-hidden.sh```` + +3. Commit changes to hidden.tar.secret & push per usual + +## Useful Links +### [ci.git/docs](docs/) - more DevOps documentation +### [Zephyr DevOps Overview.pptx](https://intel-my.sharepoint.com/:p:/p/christopher_g_turner/EfZ2TF9ElydPjpGBEAKiUkwBiFt5LFBZPI2aGO_HZnP7Wg?e=Bxeeho) - Permalink for our most-often presented slide-deck + +# Block Diagram + +![Zephyr CI Block Diagram](zephyrCI-block-diagram-WW36-2021.png "zephyr CI block diagram WW36 2021") diff --git a/allowlist/sanitycheck-master.allowFail b/allowlist/sanitycheck-master.allowFail deleted file mode 100644 index 3af80604dd2aa8..00000000000000 --- a/allowlist/sanitycheck-master.allowFail +++ /dev/null @@ -1,8 +0,0 @@ -# test-cases that CI skips to prevent hangs/known issues -SC_ALLOWED_TO_FAIL=( - "tests/application_development/libcxx/application_development.cpp.libcxx.exceptions" - "tests/application_development/cpp/application_development.cpp.main,posix,native_posix" - "tests/application_development/libcxx/application_development.cpp.libcxx,posix,native_posix" - "sample.display.sdl,posix,native_posix_64" - "tests/subsys/power/power_mgmt/subsys.power.device_pm,arm,qemu_cortex_m0" - ) diff --git a/allowlist/sanitycheck-v1.14-branch-intel.allowFail b/allowlist/sanitycheck-v1.14-branch-intel.allowFail deleted file mode 100644 index 4e192ed61cc1ec..00000000000000 --- a/allowlist/sanitycheck-v1.14-branch-intel.allowFail +++ /dev/null @@ -1,11 +0,0 @@ -# Sanitycheck test-cases that are allowed to fail without triggering CI run failure -# This is a simple bash array which is sourced into the -runner.sh & then used to -# delete testcases before starting a run -# -SC_ALLOWED_TO_FAIL=( -) - - -# Notes: -# -# (none) diff --git a/docs/ci.wiki/DevOps-Engineering/Hypervisor-Operations.md b/docs/ci.wiki/DevOps-Engineering/Hypervisor-Operations.md new file mode 100644 index 00000000000000..375bf67d002959 --- /dev/null +++ b/docs/ci.wiki/DevOps-Engineering/Hypervisor-Operations.md @@ -0,0 +1,35 @@ +# Zepyhr DevOps Hypervisor Operations + +## A. Summary + +DevOps operates a single VMware ESXI 6.7 hypervisor on jfsotc17 that is tasked with CI & test automation for Intel's internal Zephyr project efforts. + +## B. Accessing Hypervisor + +The hypervisor is not directly connected to the Intel intranet. To access you must be connected to our secured TestNet or use SSH tunneling to expose the https services on your local machine: + +From remote: +~~~~ +ssh -L 4430:192.168.0.254:443 zephyr-ci.jf.intel.com +https://127.0.0.1:4430 +~~~~ +From TestNet (direct connection in lab): +~~~~ +https://192.168.0.254:443 +~~~~ + +The hypervisor is also accessible via SSH from TestNet for CLI operations + +### ACL + +root account should not be used + +User accounts for DevOps engineers are created manually. + +## C. VM Control ( power on/off, reset ) + +1. Notify users of reboot/downtime. If this is a production VM, clear operation with FMOS_DevOps +2. Access ESXi UI per instructions in **B** above +3. Select the VM instance you'd like to control +4. Click the "Actions" gear & select operation. If the option you require is grayed-out, contact FMOS_DevOps for permissions. + diff --git a/docs/ci.wiki/DevOps-Engineering/Jenkins/Configuration-Spec.md b/docs/ci.wiki/DevOps-Engineering/Jenkins/Configuration-Spec.md new file mode 100644 index 00000000000000..d6d2621dafddd8 --- /dev/null +++ b/docs/ci.wiki/DevOps-Engineering/Jenkins/Configuration-Spec.md @@ -0,0 +1,71 @@ +# Zephyr DevOps Jenkins Configuration Specification +**Purpose** +This docs aims to serve as as standard-operating-procedure for deploying & configuring Jenkins for internal zephyr production use. + +**Target Audience** +DevOps Engineers + +**Doc Change Process** +* Minor changes & documentation improvements may be submitted by anyone. +* Major policy or configuration changes should be RFC'd @ FMOS_DevOps first. + +**NOTE THIS DOC IS WIP & CHANGE POLICY IS NOT ACTIVE** + +## Zephyr DevOps Jenkins Configuration Standard + +### 0. Jenkins Service Options + SSL Config + +#### Request/download .jks from https://certs.intel.com/aperture** + +#### Edit /etc/default/jenkins: + + JENKINS_ARGS="--webroot=/var/cache/$NAME/war --httpPort=$HTTP_PORT --httpsPort=8443 --httpsKeyStore=/srv/jenkins/ssl/.intel.com.jks --httpsKeyStorePassword= + +#### Re-direct port 443 connections to 8443 + sudo iptables -I INPUT 1 -p tcp --dport 8443 -j ACCEPT + sudo iptables -I INPUT 1 -p tcp --dport 8080 -j ACCEPT + sudo iptables -I INPUT 1 -p tcp --dport 443 -j ACCEPT + sudo iptables -I INPUT 1 -p tcp --dport 80 -j ACCEPT + sudo iptables -A PREROUTING -t nat -i eth0 -p tcp --dport 80 -j REDIRECT --to-port 8080 + sudo iptables -A PREROUTING -t nat -i eth0 -p tcp --dport 443 -j REDIRECT --to-port 8443 + sudo apt-get install iptables-persistent + +### 1. System-Wide Environment Variables + +**Rule:** Don't use system-wide environment variables (those specified in the "Manage Jenkins" configuration). Env should always been handled in the pipeline code or job runners. + +**Exception:** Site or deployment specifics such as locale, for example: +~~~~ +LANG=en_US.UTF-8 +PYTHONIOENCODING=UTF-8 +LANGUAGE=en_US:en +LC_ALL=en_US.UTF-8 +~~~~ + +### 2. Plugins + +**Rule:** In order to reduce DevOps cycles required for updates & overall CI execution risk, only install plug-ins from [approved list](https://gitlab.devtools.intel.com/zephyrproject-rtos/devops/infrastructure/ansible-playbooks/-/blob/current/jenkins-plugins.yaml) + +### 3. Users/Security + +**Option 1 - IT VAS + Jenkins PAM** + +If Jenkins is executing on a system with functional IT VAS, select 'Unix user/group database' as Jenkins Security Realm. This will restrict logins to accounts in /etc/passwd. This means that users wishing to access the Jenkins UI must have first logged in over SSH for VAS to pickup their idsid & create a /etc/passwd entry. + +**Option 2 - IT SAML** + +TBD... + +### 4. Jenkins Job Statuses + +Jenkins implements status conditions to represent the global status of a build: SUCCESS, UNSTABLE, FAILURE, NOT_BUILT or ABORTED. Zephyr DevOps maps these statues to Zephyr CI/Automation jobs as follows: + +**SUCCESS** - All tasks defined by job executed & returned success exit codes. + +**UNSTABLE** - All tasks defined by job executed but at least one step returned non-zero exit code. + +**FAILURE** - At least one task defined by job failed to execute. + +**ABORTED** - Job was aborted either by a user or timeout. + +**NOT_BUILT** - Unused currently. \ No newline at end of file diff --git a/docs/ci.wiki/DevOps-Engineering/Jenkins/Gitlab-Plugin.md b/docs/ci.wiki/DevOps-Engineering/Jenkins/Gitlab-Plugin.md new file mode 100644 index 00000000000000..b59c00ea8cabad --- /dev/null +++ b/docs/ci.wiki/DevOps-Engineering/Jenkins/Gitlab-Plugin.md @@ -0,0 +1,42 @@ +# Zephyr DevOps Gitlab Plugin Info & Configuration + +**Purpose** +This docs provides background information & configuration guidance for implementing Jenkins-Gitlab CI leveraging the gitlab plugin. + +**Target Audience** +DevOps Engineers + +**Doc Change Process** +* Minor changes & documentation improvements may be submitted by anyone. +* Major policy or configuration changes should be RFC'd @ FMOS_DevOps first. + +## 1. Gitlab Plugin Parameters & Jenkins Jobs + +RTM @ https://plugins.jenkins.io/gitlab-plugin/#parameter-configuration + +When a merge-request is opened on a project configured with Jenkins integration enabled, gitlab automatically transmits variables that specify the merge source repo & branch in the JSON webhook payload. DevOps also operates manually triggered jobs that allow user-provided parameters via the "Build with Parameters" option in Jenkins. + +In order for these methods to coexist in the same job, we must support different paths for manual & automated execution: +1.) automated trigger via gitlab plugin with gitlabSrcBranch & gitlabSrcRepo vars provided. +2.) manual trigger w/ user-provided srcBranch & srcRepo from “Build with Parameters” function. + +When the MRV starts, it first populates vars from the job parameters into srcRepo + srcBranch: +~~~~ +//default to override values from Jenkins Job "Build with Parameters" dialog +def srcRepo="${env.overrideSourceRepo}" +def srcBranch="${env.overrideSourceBranch}" +~~~~ + +Then we check for gitlab… vars & if set from a plugin trigger, use those instead + +~~~~ +//now override with gitlab-webhook supplied values, if they exist +if (env.gitlabSourceBranch) +{ + echo "Triggered by gitlab merge-request webhook" + srcBranch="${env.gitlabSourceBranch}" + srcRepo="${env.gitlabSourceRepoSshUrl}" +} +~~~~ + +At this point, the job can continue with srcBranch & srcRepo set correctly for either manual or automated triggers. \ No newline at end of file diff --git a/docs/ci.wiki/DevOps-Engineering/Python-Dependencies.md b/docs/ci.wiki/DevOps-Engineering/Python-Dependencies.md new file mode 100644 index 00000000000000..14807b23da1c24 --- /dev/null +++ b/docs/ci.wiki/DevOps-Engineering/Python-Dependencies.md @@ -0,0 +1,34 @@ +# Zephyr DevOps Python Dependency Method +**Purpose** +This doc describes how Python dependencies are managed on DevOps infrastructure. + +**Target Audience** +DevOps Engineers + +**Doc Change Process** +* Minor changes & documentation improvements may be submitted by anyone. +* Major policy or configuration changes should be RFC'd @ FMOS_DevOps first. +## Overview + +Zephyr DevOps maintains separate Python dependency sets for each Zephyr build-environment. For example: + +v1.14-branch - west 0.6.3, cmake 13.3, located at /usr/local_v1.14-branch + +v2.5-branch - west , cmake , located at /usr/local_v2.5-branch + +master - west , cmake , located at /usr/local_master + + +## Quick-start: Python dep install/update on DevOps VMs + +**0.** For production, schedule down-time for the VMs that you wish to update. For staging, simply clear update plans with other DevOps engineers via email or Teams chat. + +**1.** Confirm target environment is free of any existing Python packages installed under /usr/local. + +**2.** Run ansible playbook [nativeBuild02-pythonDeps.yaml](https://gitlab.devtools.intel.com/zephyrproject-rtos/devops/infrastructure/ansible-playbooks/-/blob/current/nativeBuild02-pythonDeps.yaml) with *'--limit=target.machine.intel.com'* to restrict actions to a single host. + +## Troubleshooting + +### West fails on "import west.main" + +This most often occurs on the Jenkins instances where users are likely to run 'sudo pip3 install ' which results in packages being installed under /usr/local & thus conflicting with packages ** **Make sure no depDon't use system-wide environment variables (those specified in the "Manage Jenkins" configuration). Env should always been handled in the pipeline code or job runners. \ No newline at end of file diff --git a/docs/ci.wiki/DevOps-Engineering/Remote-Hardware.md b/docs/ci.wiki/DevOps-Engineering/Remote-Hardware.md new file mode 100644 index 00000000000000..ca5b7765afe199 --- /dev/null +++ b/docs/ci.wiki/DevOps-Engineering/Remote-Hardware.md @@ -0,0 +1,206 @@ +# Zephyr DevOps Remote Hardware Documentation + +## Intro +This documentation covers DevOps' automated hardware-sharing system in JF1, typically referred to as "Remote Hardware" or "RemoteHW" for short. +RemoteHW was created enable global access to our limited pre-production x86 targets, while also providing a standard, DevOps-maintained interface for interacting with Zephyr test-devices- both for developers at the command-line & upstream automation. + +The service allows any Intel employee to: +* Access Zephyr DevOps-managed "zephyrtest" VMs with USB I/O for device-testing operations. +* Reserve x86 Zephyr test targets for exclusive use +* Execute "remotehw-" to control power, emulate USB devices & connect to target UART + +RemoteHW is built around [BeagleBoneBlack](https://beagleboard.org/black) (BBB) open-source maker-boards that provide USB device emulation via g_mass_storage + other low-latency I/O to the device-under-test (DUT). + +## Architecture + +RemoteHW ingredients: +1. Ubuntu 18.04 virtual-machines, each with dedicated USB cards & dedicated to remoteHW functions. Periodic snapshot reset. +1. Network power-switches (aka, "PDLs") +1. BeagleBoneBlack - one per target, configured as "USB TTA" per DevOps procedure. +1. RemoteHW "code", exposed to as shell env functions sourced from /etc/profile.d/remotehw* at login +1. A rack of x86 Zephyr targets in JF1-2 OISA lab + +## Status & Known-Issues + +RemoteHW is currently beta - all required commands are supported & we're now focusing on features to simplify sustaining this service. + +## Usage + +RemoteHW commands are issued from the Linux command-line, either direct by a user or through Jenkins automation.Systems connected to remoteHW are controlled via shell commands + +Here's a list of commands that are currently supported as well as features we expect to add in the near future. + +### RemoteHW Commands + +| **released commands** | description | +|-----------------------|-------------| +| remotehw-- **reserve** | reserve target | +| remotehw-- **release** | release target reservation, also closes picocom sessions started **get-console** | +| remotehw-- **rsvrst** | reset reservation & close picocom sessions, even if owned by another user | +| remotehw-- **power-on** | enables AC-power to target system | +| remotehw-- **power-off** | disabled AC-power to target system | +| remotehw-- **usb-efi** | creates an EFI boot-disk with zephyr.EFI & attaches to target system | +| remotehw-- **usb-grub** | creates a grub boot-disk with zephyr.elf as multiboot target & attaches to target system | +| remotehw-- **usb-sbl** | creates SlimBootLoader payload disk & injects sbl_os into /boot directory & attaches to target system | +| remotehw-- **usb-acrn** (zephyr.bin) (grub.cfg) | creates acrn boot disk from [acrn-binaries.zip](https://gitlab.devtools.intel.com/zephyrproject-rtos/devops/infrastructure/ansible-playbooks/-/blob/latest/remotehw/opt/acrn-binaries.zip). Overrides zephyr.bin & grub.cfg if optional arguments are supplied. | +| remotehw-- **usb-get-p1** | disconnects emulated USB disk from target & opens ssh connection to the USB TTA. Files can be manipulated under /mnt/loop. Disk is reconnected to target when the user exits the USB TTA ssh session. | +| remotehw-- **get-console** | opens terminal session to configured tty for target system | +| remotehw-- **get-tty** | return sting for configured tty device, eg /dev/ttyUSB6 | Z11 | + +| **development features** | description | ETA | +|--------------------------------|-------------|-----| +| remotehw-- **status** | get target status | Z11 | + +### RemoteHW Features In-Development + +* GPIO to target: available but lack published methods, documentation +* PXE boot: possible but not planned yet +* Snapshot & rollback are not yet automated - only triggering rollback when requested. + +### Known Issues & Bugs + +1. **Commands that take a file as an argument will fail silently if the file specified by the argument does not exist or is not accessible.** + +## Getting Help +**Source Code** + +RemoteHW is implemented by env functions sourced from /etc/profile.d/remotehw* when you login to DevOps infrastructure. These scripts are managed by ansible, our configuration-management tool, however users are free to copy the scripts from /etc/profile.d into their local env & modify the functionality as needed. + +* [remotehw env functions](https://gitlab.devtools.intel.com/zephyrproject-rtos/devops/infrastructure/ansible-playbooks/-/tree/master/remotehw) +* [ansible playbook to deploy remotehw env](https://gitlab.devtools.intel.com/zephyrproject-rtos/devops/infrastructure/ansible-playbooks/-/blob/master/ubuntu18_PRD02-remotehw.yaml) + +**Email** + +[Email FMOS DevOps PDL](mailto:fmos.devops@intel.com?subject=DevOps%20RemoteHW%20Question) if you have any questions, issues or feature requests. + +## Quick start + +#### 1. Get access to our remotehw infrastructure. + +Access to DevOps infrastructure is controlled by a [YAML file](https://gitlab.devtools.intel.com/zephyrproject-rtos/devops/infrastructure/ansible-playbooks/-/blob/current/acl-remotehw.yaml) in git. If you see your idsid in the list, you should have access. + +**Options for requesting access:** +1. If you have a gitlab account, edit (or submit a cmd-line merge) [remotehw-acl.yaml](https://gitlab.devtools.intel.com/zephyrproject-rtos/devops/infrastructure/ansible-playbooks/-/blob/current/acl-remotehw.yaml), adding your domain/idsid listed in the appropriate groups. + +This will automatically create a merge-request that DevOps will review & approve or deny. +Approved ACL changes are applied by this [ansible playbook](https://gitlab.devtools.intel.com/zephyrproject-rtos/devops/infrastructure/ansible-playbooks/-/blob/current/ubuntu18_PRD01-acl.yaml), which is run from a jenkins job triggered on changes. + +1. [Email FMOS DevOps PDL](mailto:fmos.devops@intel.com?subject=DevOps%20ACL%20Request) with a list of machines or functions and we'll process a acl-remotehw.yaml merge for you. + +#### 2. Select remoteHW VM & connect + +Select remoteHW VM based on the device you'd like to access: + +|**zephyrtest-blue.jf.intel.com**| | +|-----------------------------|----| +| ehlsku7 | EHL CRB, SKU7 | +| ehlsku11 | EHL CRB, SKU11 | +| tglchr01 | (**disabled until WW04**) TGL-U 4+2 Chrome (power on/off + console only) | +| minnow01 | Minnowboard | + + +| **zephyrtest-orange.jf.intel.com** (staging) || +|----------------------------|-----| +| upx01 | (**disabled until WW04, devicetree issues**) Up Extreme | +| (avail) || +| (avail) || +| (avail) || + +``` +$ ssh zephyrtest-.jf.intel.com +``` +#### 3. Reserve system +Example, EHL SKU7: +``` +# reserve system +user@zephyrtest-blue$ remotehw-ehlsku7-reserve + +.remotehw-reserve (70afd76) + * ehlsku7 is available. Setting owner to user. +Done. System reserved. +``` + +#### 4. Boot arbitrary zephyr.efi via emulated USB flash-disk +Example, EHL SKU7: +``` +# power-off system first +user@zephyrtest-blue$ remotehw-ehlsku7-power-off + +.remotehw-power-off (70afd76) + * sending power-off command to pwrswitch-blue.testnet/outlet?3 +Done. Target powered-off. + +# create & attach usb flash-disk using zephyr.efi +user@zephyrtest-blue$ remotehw-ehlsku7-usb zephyr.efi + +.remotehw-usb-efi (70afd76) + * resetting usbtta state + * creating new boot disk filesystem + * deploying zephyr.efi to usbtta + * attaching completed disk image to target +Done. USB disk attached and target ready for power-on. + +# power-on system +user@zephyrtest-blue$ remotehw-ehlsku7-power-on + +.remotehw-power-on (70afd76) + * sending power-on command to pwrswitch-blue.testnet/outlet?3 +Done. Target powered-on. + +# get console to system +user@zephyrtest-blue$ remotehw-ehlsku7-get-console + +... + +# ctrl-x + ctrl-a to exit +``` + +#### 4. Power-off system & release reservation +Example, EHL SKU7: +``` +# power-off system +user@zephyrtest-blue$ remotehw-ehlsku7-power-off + +.remotehw-power-off (70afd76) + * sending power-off command to pwrswitch-blue.testnet/outlet?3 +Done. Target powered-off. + +# release reservation & kill any console sessions +user@zephyrtest-blue$ remotehw-ehlsku7-reserve + +.remotehw-release (70afd76) + * ehlsku7 owner is currently user, releasing reservation & killing console sessions. +``` +## USB Test-Target Adapters (USB TTA) + +For USB device emulation + other low-latency I/O to the device-under-test (DUT), +Zephyr DevOps has deployed an array of [BeagleBoneBlack](https://beagleboard.org/black) (BBB) open-source maker-boards +flashed with a [custom](https://github.com/cvondratek/usb-boot-adapter.bcbprj) [yocto](https://git.yoctoproject.org/cgit/cgit.cgi/meta-ti/) build that enables linux g_mass_storage and squashfs root in RAM. + +The USB TTAs run 4.15+ LTS kernel and have a lightweight dropbear+busybox console environment that can run automation scripts, etc. + +### Interacting with USB TTAs + + To demonstate USB TTA debug & automation capabilities, here's an annotated + list of commands one might use to manually modify the emulated USB flash-disk + contents. +``` +# initialize the USB disk with the acrn-binaries.zip payload +remotehw--usb-acrn +# connect to usb-tta for your system, note dot prefix on this command +.remotehw--get-tta +# fyi: you are now logged into a yocto image running on a BeagleBoneBlack. Fun! +# unload the g_mass_storage USB driver, this will also disconnect the USB flash disk from the test system +modprobe -r g_mass_storage +# the disk image is stored at /tmp/zephyr.disk as a 64MB linear block that emulates a flash-device, +# we need to index into the flash device to locate the a file-system partition using losetup & mount it: +losetup -P /dev/loop0 /tmp/zephyr.disk +mount /dev/loop0p1 /mnt/loop +# Disk is now mounted to the USB TTA & you can access the files on the boot disk at /mnt/loop +# USB TTAs run a stripped-down yocto build with bash, vi, etc. +# When finished making changes, umount the disk, disconnect the loop device & restart the usb driver +umount /mnt/loop +losetup -d /dev/loop0 +modprobe g_mass_storage file=/tmp/zephyr.disk ro=y iManufacturer=zephyrdevops iProduct=FlashKey iSerialNumber=1234" +# the emulated disk is now connected to the target system +``` \ No newline at end of file diff --git a/docs/ci.wiki/DevOps-Engineering/Test-Agent-Processes.md b/docs/ci.wiki/DevOps-Engineering/Test-Agent-Processes.md new file mode 100644 index 00000000000000..53b486519f3ba7 --- /dev/null +++ b/docs/ci.wiki/DevOps-Engineering/Test-Agent-Processes.md @@ -0,0 +1,47 @@ +# DevOps process for provisioning a new NUC for test-agent use + +1. Install RAM (2x 32GB) +1. Connect keyboard, mouse & monitor to NUC, then power-on + * Initial power-on may take up to 1 minute while memory is detected & calibrated + * Confirm detected memory capacity is "63G" in BIOS setup screen +1. Update BIOS to latest + * Download BExxxx.bio file (for NUC8i5BEK: https://downloadcenter.intel.com/download/29282/BIOS-Update-BECFL357-86A-?product=126147) + * Copy .bio file to USB disk & insert in NUC USB port + * Hit F7 from BIOS screen, browse to fs0: and select BExxxx.bio file for update + * Update will take 2-3 minutes. The system will reboot when complete. +1. Configure NUC BIOS settings as follows: + * Hit F9 to load BIOS defaults & confirm + * Click "Advanced" button & configure sub-sections as follows: + * Boot->Boot Configuration + * Check "Unlimited Boot to Network Attempts" + * Uncheck "USB", "Thunderbolt" and "Optical" under Boot Devices + * Boot->Secure Boot + * Uncheck "Secure Boot" + * Power + * Change "After Power Failure" to "Power On" + * "Cooling" + * Change "Fan-Control Mode" to "Cool" + * Hit F10 to save settings & exit +1. Install into HW Test & confirm NUC powers-on when plugged-in (without a button press) +1. Provision NUC MAC-address in TestHead pxeboot config to allow it to fetch a OS & boot + * Link: TBD + +# Process for removing a test-agent NUC from the test-network + +To minimize the chance for a CI or QA automation outage, please use the following process to add/remove test-agents to/from the zephyr QA/CI test: + +## Removing a test-agent: + +1. If possible, notify FMOS_DevOps BEFORE removing a test-agent from test-network. DevOps can remove the NUC from CI inventory so that jobs are not being sent to the NUC while it's off the network. +1. Power-off test-agent system. Test-agents are stateless & do not need to be shutdown- ok to disconnect power with the power-switch or unplug. +1. Disconnect the network cable. Use care with the NUC network jacks- they can be damaged by forceful removal of the RJ-45 connector. + +## Adding a test-agent/NUC: + +1. If the test-agent is new, please STOP & complete the setup process first [Test Agent Setup Process.md] +1. If the test-agent is existing but has changed number or type of connected DUT (boards), STOP. Please contact DevOps to reprovision the test-agent. Have MAC address, power-switch number + types of boards connected to expedite service. +**Please submit a MR to https://gitlab.devtools.intel.com/zephyrproject-rtos/ci/-/blob/latest/hwtest/dut.map** +1. [Re]connect DUT/boards to test-agent if they were removed. +1. Plug-in NUC to network. Be sure to use a CAT-5E or CAT-6 (preferred) network cable in good-condition +1. Plug-in NUC power-supply into provisioned power-switch number +1. NUC should power-on & boot without pressing the power-button diff --git a/docs/ci.wiki/DevOps-Engineering/Test-Head-Processes.md b/docs/ci.wiki/DevOps-Engineering/Test-Head-Processes.md new file mode 100644 index 00000000000000..f3dc3ee54b2cc5 --- /dev/null +++ b/docs/ci.wiki/DevOps-Engineering/Test-Head-Processes.md @@ -0,0 +1,45 @@ +# Zephyr DevOps Test-Head Operations + +## **Links:** +### **[zephyr-testhead.git](https://gitlab.devtools.intel.com/zephyrproject-rtos/devops/infrastructure/zephyr-testhead)** + +## Frequently Asked Questions +### What services does the Test-head provide +* iPXE boot server for all downstream test-agents connected to the Test-net +* dnsmasq providing DHCP/BOOTP + tftp services +* apache2 providing http services for rapid kernel + rootfs transfer +* kernel: intel-lts-linux, latest release, controlled defconfig +* initramfs: ubuntu-base-latest (20.04 currently) + zephyr build depend + +### How does Jenkins communicate & authenticate with the test-agents? +All Jenkins-Agent communication occurs via Jenkins Remoting, over ssh. Authentication is via a single SSH key that is generated by the pxeboot service when it is installed on the Testhead. + +### If the agents are confined to the private test net, how does Jenkins connect to the agents? +The Testhead implements an ssh port-forward through the test firewall, one port for each agent. For example: + external port 22220 -> private 192.168.0.220:22 + external port 22221 -> private 192.168.0.221:22 + ... + +### Can I login to the agents directly? +Yes, this is an expected tasks for DevOps & QA engineers for debugging. You will need the SSH key for root@ from the test-head, located at . To login: + ssh -i root@ + +### Where are my build files on the test-agent? +Jenkins remoting uses a $JENKINS_HOME/$WORKSPACE/$JOB_NAME convention for all jobs on the test-agents. Currently $JENKINS_HOME is /jenkins for all Zephyr DevOps test & build agents + +### How often do the test-agents power-on or reboot? +It depends- the test-agents are designed to be powered-off when they're not used & powered-on before a job so that DUTs are tested in a freshly power-on state. + +Note: As of WW08'2021, we do not force agents to power-down after a job so that job state is maintained for debugging purposes. + +### How do agents register with the Jenkins master +Currently, we do this manually, entering the port #, agent name & labels manually. This operation is only required when a new agent is added or it's DUT inventory is changed. + +DevOps is developing an automated process that uses an ansible inventory file to specify the agent-to-DUT mapping using a simple array of DUT types, ETA Z03'2020. + +### How is DUTs inventory mapped to agents? +We use Jenkins agent labels to indicate which DUTs are connected to a specific test-agent. Jenkins allows mulitiple labels per agent, for example: + nuc_64gb-jf # label for a generic NUC in jf (Jones Farm) with 64GB of RAM suitable for virtual sanitycheck jobgs + hwtest-jf-frdm_k64f # label for a hwtest agent in JF with a frdm_k64f zephyr DUT attached. + +Each test-agent is listed by IP address in an ansible inventory file that also specifies an array of Zephyr-project DUTs that are attached. When the ansible playbook is run at agent boot, this array is read and automatically. diff --git a/docs/ci.wiki/DevOps-Engineering/Virtual-Infrastructure.md b/docs/ci.wiki/DevOps-Engineering/Virtual-Infrastructure.md new file mode 100644 index 00000000000000..d18dedc2fc3023 --- /dev/null +++ b/docs/ci.wiki/DevOps-Engineering/Virtual-Infrastructure.md @@ -0,0 +1,14 @@ +# Zephyr DevOps Virtual Infrastructure + +**Purpose:** Specifications & Operations Information for DevOps-operated hypervisors & virtual-machines. + +**Target Audience:** DevOps Engineers + +**Doc Change Process** +* Minor changes & documentation improvements may be submitted by anyone. +* Major policy or configuration changes should be RFC'd @ FMOS_DevOps first. +## Overview & specifications +At this instant, specifications for DevOps hypervisors & VMs are currently tracked in **[Zephyr-DevOps-Infrastructure-Spec.xlsx](https://intel-my.sharepoint.com/:x:/p/chris_vondrachek/EaB7UZ_xwOBJlL0bR63iPqQBssytzWhsyXn0jZ31hosxvg?e=ve6szU)**. + +In the near future, the excel content will be transferred into this wiki page. + diff --git a/docs/ci.wiki/Overview/Internal-CI-Process.md b/docs/ci.wiki/Overview/Internal-CI-Process.md new file mode 100644 index 00000000000000..7412cbad6218a4 --- /dev/null +++ b/docs/ci.wiki/Overview/Internal-CI-Process.md @@ -0,0 +1 @@ +todo: insert diagrams & description of CI process \ No newline at end of file diff --git a/docs/ci.wiki/README.md b/docs/ci.wiki/README.md new file mode 100644 index 00000000000000..a7f499fac37596 --- /dev/null +++ b/docs/ci.wiki/README.md @@ -0,0 +1,95 @@ +# Zephyr DevOps Documentation Wiki +**Purpose:** Official documentation wiki for Zephyr DevOps services & internal processes. + +**Target Audience:** Intel Zephyr developers & users of Zephyr DevOps services. + +**Document Owner:** Zephyr DevOps + +**Change Process:** +* Minor edits & improvements ok without approval. +* RFC to FMOS_DevOps for all other changes. + +## DevOps Service Links + +### CI + +We use Jenkins for CI/CD automation. Our main instance is accessible to anyone inside of Intel, here: **https://zephyr-ci.ostc.intel.com**. We also operate a staging instance at **https://zephyr-ci.jf.intel.com:8080**. + +### Remote Hardware (RemoteHW) + +DevOps operates a remote hardware sharing-system in the JF1-2 lab (US/Oregon). +Zephyr targets are placed on shelves in our test-rack with network power control, network-access & I/O pass-through. +Other capabilities such as power-measurement or wireless network can be enabled as well. + +For additional information & source code, please see **[Remote Hardware](Remote Hardware.md)**. + +### SDK docker + +We maintain an internal fork of the Zephyr project SDK docker configured for use within the Intel intranet. +See: + +**https://gitlab.devtools.intel.com/zephyrproject-rtos/devops/infrastructure/sdk-docker-intel/-/tree/intel** + +### Infrastructure & Systems + +### TestNet (.testnet) + +DevOps maintains a private test network for all HW test automation & operations. Most VMs in our cluster have access to this network via a secondary network interface with address 192.168.0.0/24. See **[DevOps Virtual Infrastructure](DevOps Virtual Infrastructure.md)** for more info. + +#### SSP Ops VMs (*.ostc.intel.com) + +DevOps production CI services are currently hosted on VMs provided by SSP-Ops but we expect to leave their support umbrella around WW08 2021. This section will be removed in the near future. + +#### DevOps VMs + +The following VMs are deployed to the Vmware ESXi hypervisor in JF1-2 lab. + +**zephyr-ci.jf.intel.com** - new Jenkins CI main, under construction + +**zephyr-zabbix.jf.intel.com** - Zabbix systems-monitoring instance + +**zephyrtest-orange.jf.intel.com** - remoteHW host, DevOps use + +**zephyrtest-blue.jf.intel.com** - remoteHW host, EHL + TGRVP + +**fresno.jf.intel.com** - DevOps use + +Backend Service VMs (accessible only from within TestNet) + +**nas.zephyr-testnet** - freeNAS VM serving 2TB of SSD RAID + +**gw.zephyr-testnet** - pfsense gateway for TestNet + +**zbuild{01..06}.testnet** - CI build agents + +#### Physical Systems + +**zephyr-ci-th01.jf.intel.com** - 1U server, implements "TestHead" function in JF + +**zephyr-ci-th02.jf.intel.com** - 1U server, implements "TestHead" function in SH (once installed) + +### git services + +#### zephyrproject-rtos@gitlab.devtools + +DevOps adminstrates the IT-provided gitlab project for all Intel-internal Zephyr development: + +https://gitlab.devtools.intel.com/zephyrproject-rtos + +#### git cache (gitlab container) + +**todo:** new gitlab-container url + +#### zephyr devops teamforge + +Teamforge repo with CI keys & credentials (DevOps only) + +https://tf-amr-1.devtools.intel.com/sf/projects/zdevops/ + +### Hardware Test (HWTest & TestNet) +**todo:** condense docs & link here + +### SWLC/SDL +**todo:** link to local docs + + diff --git a/docs/ci.wiki/References-&-Learning/Docker-&-Containers.md b/docs/ci.wiki/References-&-Learning/Docker-&-Containers.md new file mode 100644 index 00000000000000..14ad6880093a59 --- /dev/null +++ b/docs/ci.wiki/References-&-Learning/Docker-&-Containers.md @@ -0,0 +1,5 @@ +A collection of resources for learning about Docker & container OS-virtualization in-general: +* https://www.docker.com/blog/docker-101-getting-to-know-docker/ +* https://en.wikipedia.org/wiki/Docker_(software) +* https://github.com/zephyrproject-rtos/docker-image +* https://docs.docker.com/engine/reference/builder/ diff --git a/hidden.tar.secret b/hidden.tar.secret new file mode 100644 index 00000000000000..49915915faecf3 Binary files /dev/null and b/hidden.tar.secret differ diff --git a/hide-hidden.sh b/hide-hidden.sh new file mode 100755 index 00000000000000..f97fc1acdd4f83 --- /dev/null +++ b/hide-hidden.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# +# this script tars the contents of ./hidden, runs git secret add, and stages an encryted hidden.tar.secret for git commit. +# +# -> user running this script must have trusted gpg identity enrolled in git-secret + +export PATH=/usr/local/bin:$PATH + +if [ -f hidden.tar.secret ]; then + echo hidden.tar.secret already exists, refusing to overwrite. + exit; +fi + +if [ -d hidden/ ]; then + tar -czf hidden.tar hidden/ && git secret hide && git add hidden.tar.secret && rm -f hidden.tar && rm -rf hidden/ +else + echo hidden/ is missing, try reveal-hidden.sh first + exit; +fi diff --git a/reveal-hidden.sh b/reveal-hidden.sh new file mode 100755 index 00000000000000..48ffd106ec5a62 --- /dev/null +++ b/reveal-hidden.sh @@ -0,0 +1,14 @@ +#!/bin/bash +# +# script to reveal contents of hidden directory using git-secret +# +# -> user running this script must have trusted gpg identity enrolled in git-secret + +export PATH=/usr/local/bin:$PATH + +if [ -d hidden/ ]; then + echo hidden/ already exists, refusing to overwrite. + exit; +fi + +git secret reveal && tar -xzf hidden.tar && rm -f hidden.* diff --git a/coverity/README.md b/src/coverity/README.md similarity index 100% rename from coverity/README.md rename to src/coverity/README.md diff --git a/coverity/coverity-scans-1.14-branch-intel/coverity-scan-1.14-branch-intel.sh b/src/coverity/coverity-scans-1.14-branch-intel/coverity-scan-1.14-branch-intel.sh similarity index 100% rename from coverity/coverity-scans-1.14-branch-intel/coverity-scan-1.14-branch-intel.sh rename to src/coverity/coverity-scans-1.14-branch-intel/coverity-scan-1.14-branch-intel.sh diff --git a/coverity/external_coverity_scans/coverity-open-github-issues.py b/src/coverity/external_coverity_scans/coverity-open-github-issues.py similarity index 100% rename from coverity/external_coverity_scans/coverity-open-github-issues.py rename to src/coverity/external_coverity_scans/coverity-open-github-issues.py diff --git a/coverity/external_coverity_scans/coverity-run-ci.sh b/src/coverity/external_coverity_scans/coverity-run-ci.sh similarity index 100% rename from coverity/external_coverity_scans/coverity-run-ci.sh rename to src/coverity/external_coverity_scans/coverity-run-ci.sh diff --git a/coverity/external_coverity_scans/run-cov.sh b/src/coverity/external_coverity_scans/run-cov.sh similarity index 100% rename from coverity/external_coverity_scans/run-cov.sh rename to src/coverity/external_coverity_scans/run-cov.sh diff --git a/coverity/misra_c_scans/MISRA.config b/src/coverity/misra_c_scans/MISRA.config similarity index 100% rename from coverity/misra_c_scans/MISRA.config rename to src/coverity/misra_c_scans/MISRA.config diff --git a/coverity/misra_c_scans/coverity_misra_c_scans.sh b/src/coverity/misra_c_scans/coverity_misra_c_scans.sh similarity index 100% rename from coverity/misra_c_scans/coverity_misra_c_scans.sh rename to src/coverity/misra_c_scans/coverity_misra_c_scans.sh diff --git a/hwtest/README.md b/src/hwtest/README.md similarity index 100% rename from hwtest/README.md rename to src/hwtest/README.md diff --git a/hwtest/dut.map b/src/hwtest/dut.map similarity index 100% rename from hwtest/dut.map rename to src/hwtest/dut.map diff --git a/hwtest/get-tty.sh b/src/hwtest/get-tty.sh similarity index 100% rename from hwtest/get-tty.sh rename to src/hwtest/get-tty.sh diff --git a/hwtest/runner.sh b/src/hwtest/runner.sh similarity index 100% rename from hwtest/runner.sh rename to src/hwtest/runner.sh diff --git a/stateless/README.md b/src/stateless/README.md similarity index 100% rename from stateless/README.md rename to src/stateless/README.md diff --git a/stateless/pipeline.groovy b/src/stateless/pipeline.groovy similarity index 100% rename from stateless/pipeline.groovy rename to src/stateless/pipeline.groovy diff --git a/stateless/runner.sh b/src/stateless/runner.sh similarity index 100% rename from stateless/runner.sh rename to src/stateless/runner.sh diff --git a/swlc/check-whitelist-status/README.txt b/src/swlc/check-whitelist-status/README.txt similarity index 100% rename from swlc/check-whitelist-status/README.txt rename to src/swlc/check-whitelist-status/README.txt diff --git a/swlc/check-whitelist-status/check_whitelist_status.py b/src/swlc/check-whitelist-status/check_whitelist_status.py similarity index 100% rename from swlc/check-whitelist-status/check_whitelist_status.py rename to src/swlc/check-whitelist-status/check_whitelist_status.py diff --git a/swlc/check-whitelist-status/open_source_projects.txt b/src/swlc/check-whitelist-status/open_source_projects.txt similarity index 100% rename from swlc/check-whitelist-status/open_source_projects.txt rename to src/swlc/check-whitelist-status/open_source_projects.txt diff --git a/swlc/check-whitelist-status/requirements.txt b/src/swlc/check-whitelist-status/requirements.txt similarity index 100% rename from swlc/check-whitelist-status/requirements.txt rename to src/swlc/check-whitelist-status/requirements.txt diff --git a/utils/README.md b/src/utils/README.md similarity index 100% rename from utils/README.md rename to src/utils/README.md diff --git a/utils/branch-detect.groovy b/src/utils/branch-detect.groovy similarity index 100% rename from utils/branch-detect.groovy rename to src/utils/branch-detect.groovy diff --git a/utils/get_failed.py b/src/utils/get_failed.py similarity index 100% rename from utils/get_failed.py rename to src/utils/get_failed.py diff --git a/utils/gitlab.groovy b/src/utils/gitlab.groovy similarity index 100% rename from utils/gitlab.groovy rename to src/utils/gitlab.groovy diff --git a/utils/jenkins-build-job.sh b/src/utils/jenkins-build-job.sh similarity index 100% rename from utils/jenkins-build-job.sh rename to src/utils/jenkins-build-job.sh diff --git a/utils/jenkins-vis.groovy b/src/utils/jenkins-vis.groovy similarity index 100% rename from utils/jenkins-vis.groovy rename to src/utils/jenkins-vis.groovy diff --git a/utils/merge.sh b/src/utils/merge.sh similarity index 100% rename from utils/merge.sh rename to src/utils/merge.sh diff --git a/utils/tag.sh b/src/utils/tag.sh similarity index 100% rename from utils/tag.sh rename to src/utils/tag.sh diff --git a/utils/utils.py b/src/utils/utils.py similarity index 100% rename from utils/utils.py rename to src/utils/utils.py diff --git a/utils/weekly_tagger.sh b/src/utils/weekly_tagger.sh similarity index 100% rename from utils/weekly_tagger.sh rename to src/utils/weekly_tagger.sh diff --git a/utils/west-init-update.sh b/src/utils/west-init-update.sh similarity index 100% rename from utils/west-init-update.sh rename to src/utils/west-init-update.sh diff --git a/utils/west.groovy b/src/utils/west.groovy similarity index 100% rename from utils/west.groovy rename to src/utils/west.groovy diff --git a/src/zabbix/zabbix_server_installer.sh b/src/zabbix/zabbix_server_installer.sh new file mode 100644 index 00000000000000..1c5072d5a457df --- /dev/null +++ b/src/zabbix/zabbix_server_installer.sh @@ -0,0 +1,54 @@ +# zabbix UI service installer + +sudo apt-get update +sudo apt-get upgrade +sudo apt-get install docker.io +sudo systemctl enable docker.service +sudo systemctl start docker.service +sudo nano /etc/group + +# some assembly required here... +# this was written prior to the 2019 introduction of docker.hub rate-throttling +# now, pulling from inside intel won't work & zabbix is absent from caas.intel mirror +# so need to manually download .tar from docker.hub & inject them to PWD before running this script +# --OR-- +# login to docker.hub with even a free account to bypass the rate-throttling +# +#if have docker.hub login +# sudo docker pull zabbix/zabbix-server-pgsql +# +#else using manually downloaded tars +gunzip zabbix-* +sudo docker load -i zabbix-server-pgsql-alpine52.tar +sudo docker load -i zabbix-web-nginx-pgsql-alpine52.tar +sudo docker load -i zabbix-server-snmptraps-alpine52.tar +sudo docker load -i postgres-latest.tar +#end if + + +sudo docker network create --subnet 172.20.0.0/16 --ip-range 172.20.240.0/20 zabbix-net + +sudo docker run --name postgres-server -t -e POSTGRES_USER="zabbix" -e POSTGRES_PASSWORD="zabbix_pwd" -e POSTGRES_DB="zabbix" \ + --network=zabbix-net --restart unless-stopped \ + -d postgres:latest + +sudo docker run --name zabbix-snmptraps -t -v /zbx_instance/snmptraps:/var/lib/zabbix/snmptraps:rw \ + -v /var/lib/zabbix/mibs:/usr/share/snmp/mibs:ro \ + --network=zabbix-net -p 162:1162/udp --restart unless-stopped \ + -d zabbix/zabbix-snmptraps:alpine-5.2-latest + +sudo docker run --name zabbix-server-pgsql -t -e DB_SERVER_HOST="postgres-server" \ + -e POSTGRES_USER="zabbix" -e POSTGRES_PASSWORD="zabbix_pwd" \ + -e POSTGRES_DB="zabbix" -e ZBX_ENABLE_SNMP_TRAPS="true" \ + --network=zabbix-net -p 10051:10051 \ + --volumes-from zabbix-snmptraps \ + --restart unless-stopped \ + -d zabbix/zabbix-server-pgsql:alpine-5.2-latest + +sudo docker run --name zabbix-web-nginx-pgsql -t \ + -e ZBX_SERVER_HOST="zabbix-server-pgsql" -e DB_SERVER_HOST="postgres-server" \ + -e POSTGRES_USER="zabbix" -e POSTGRES_PASSWORD="zabbix_pwd" \ + -e POSTGRES_DB="zabbix" --network=zabbix-net -p 443:8443 -p 80:8080 -v /etc/ssl/nginx:/etc/ssl/nginx:ro \ + --restart unless-stopped \ + -d zabbix/zabbix-web-nginx-pgsql:alpine-5.2-latest + diff --git a/zephyrCI-block-diagram-WW36-2021.png b/zephyrCI-block-diagram-WW36-2021.png new file mode 100644 index 00000000000000..482480e4f13947 Binary files /dev/null and b/zephyrCI-block-diagram-WW36-2021.png differ diff --git a/zephyrCI-block-diagrams-WW08-2021.png b/zephyrCI-block-diagrams-WW08-2021.png deleted file mode 100644 index 9427c927bda599..00000000000000 Binary files a/zephyrCI-block-diagrams-WW08-2021.png and /dev/null differ