From 65e8a090ed01dac730810e952198ba6503730bd0 Mon Sep 17 00:00:00 2001 From: Wayne Starr Date: Fri, 6 Sep 2024 16:08:57 -0600 Subject: [PATCH] more feedback and final test enablement --- .github/workflows/nightly-testing.yaml | 9 ++------- adr/0003-autoscaling-runners.md | 4 ++-- 2 files changed, 4 insertions(+), 9 deletions(-) diff --git a/.github/workflows/nightly-testing.yaml b/.github/workflows/nightly-testing.yaml index 31ed9ca6..bfb6c127 100644 --- a/.github/workflows/nightly-testing.yaml +++ b/.github/workflows/nightly-testing.yaml @@ -1,14 +1,9 @@ name: Nightly Testing on: - # TODO: @WSTARR - turn this on when ready - # schedule: - # - cron: '0 6 * * *' # Runs at midnight Mountain every day + schedule: + - cron: '0 6 * * *' # Runs at midnight Mountain every day workflow_dispatch: - # TODO: @WSTARR - temporary testing! - pull_request: - branches: [main] - types: [milestoned, opened, synchronize] # Abort prior jobs in the same workflow / PR concurrency: diff --git a/adr/0003-autoscaling-runners.md b/adr/0003-autoscaling-runners.md index f79faa4a..5f48d2e3 100644 --- a/adr/0003-autoscaling-runners.md +++ b/adr/0003-autoscaling-runners.md @@ -48,7 +48,7 @@ In order to enable this there are two potential paths: - Only works in smaller environments, and likely only makes sense on-prem -4. Create a custom [shell executor](https://docs.gitlab.com/runner/executors/shell.html) wrapper that handles autoscaling. +4. Create a [custom executor](https://docs.gitlab.com/runner/executors/custom.html) wrapper that handles autoscaling. **Pros:** @@ -64,4 +64,4 @@ We decided to implement the **instance executor and fleeting plugins** to solve ## Consequences -Runner start times could be compromised with this solution and this does not support on-prem environments. For the runner start times we can explore the docker autoscaler though we will lose OS access and will need to work through / around that. On-prem environments can fallback to the existing k8s runners and in the future we may look to explore VirtualBox runners as a third option. +Runner start times could be compromised with this solution and this does not support on-prem environments. For the runner start times we can explore the docker autoscaler though we will lose OS access and will need to work through / around that. On-prem environments can fallback to the existing k8s runners and in the future we may look to explore VirtualBox or custom runners as a third option.