This repository has been archived by the owner on Oct 8, 2024. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Makefile
executable file
·198 lines (161 loc) · 8.73 KB
/
Makefile
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
# The version of Zarf to use. To keep this repo as portable as possible the Zarf binary will be downloaded and added to
# the build folder.
# renovate: datasource=github-tags depName=defenseunicorns/zarf
ZARF_VERSION := v0.32.2
# The version of the build harness container to use
BUILD_HARNESS_REPO := ghcr.io/defenseunicorns/build-harness/build-harness
# renovate: datasource=docker depName=ghcr.io/defenseunicorns/build-harness/build-harness
BUILD_HARNESS_VERSION := 1.14.8
# renovate: datasource=docker depName=ghcr.io/defenseunicorns/packages/dubbd-k3d extractVersion=^(?<version>\d+\.\d+\.\d+)
DUBBD_K3D_VERSION := 0.17.0
# Figure out which Zarf binary we should use based on the operating system we are on
ZARF_BIN := zarf
UNAME_S := $(shell uname -s)
UNAME_M := $(shell uname -m)
ifeq ($(UNAME_M),x86_64)
ARCH := amd64
else ifeq ($(UNAME_M),amd64)
ARCH := amd64
else ifeq ($(UNAME_M),arm64)
ARCH := arm64
else
$(error Unsupported architecture: $(UNAME_M))
endif
# Silent mode by default. Run `make VERBOSE=1` to turn off silent mode.
ifndef VERBOSE
.SILENT:
endif
# Optionally add the "-it" flag for docker run commands if the env var "CI" is not set (meaning we are on a local machine and not in github actions)
TTY_ARG :=
ifndef CI
TTY_ARG := -it
endif
.DEFAULT_GOAL := help
# Idiomatic way to force a target to always run, by having it depend on this dummy target
FORCE:
.PHONY: help
help: ## Show a list of all targets
grep -E '^\S*:.*##.*$$' $(MAKEFILE_LIST) \
| sed -n 's/^\(.*\): \(.*\)##\(.*\)/\1:\3/p' \
| column -t -s ":"
########################################################################
# Utility Section
########################################################################
.PHONY: docker-save-build-harness
docker-save-build-harness: ## Pulls the build harness docker image and saves it to a tarball
mkdir -p .cache/docker
docker pull $(BUILD_HARNESS_REPO):$(BUILD_HARNESS_VERSION)
docker save -o .cache/docker/build-harness.tar $(BUILD_HARNESS_REPO):$(BUILD_HARNESS_VERSION)
.PHONY: docker-load-build-harness
docker-load-build-harness: ## Loads the saved build harness docker image
docker load -i .cache/docker/build-harness.tar
.PHONY: run-pre-commit-hooks
run-pre-commit-hooks: ## Run all pre-commit hooks. Returns nonzero exit code if any hooks fail. Uses Docker for maximum compatibility
mkdir -p .cache/pre-commit
docker run --rm -v "${PWD}:/app" --workdir "/app" -e "PRE_COMMIT_HOME=/app/.cache/pre-commit" $(BUILD_HARNESS_REPO):$(BUILD_HARNESS_VERSION) bash -c 'git config --global --add safe.directory /app && asdf install && pre-commit run -a'
.PHONY: fix-cache-permissions
fix-cache-permissions: ## Fixes the permissions on the pre-commit cache
docker run --rm -v "${PWD}:/app" --workdir "/app" -e "PRE_COMMIT_HOME=/app/.cache/pre-commit" $(BUILD_HARNESS_REPO):$(BUILD_HARNESS_VERSION) chmod -R a+rx .cache
########################################################################
# Test Section
########################################################################
.PHONY: test
test: ## Run all automated tests. Requires access to an AWS account. Costs money. Requires env vars "REPO_URL", "GIT_BRANCH", "REGISTRY1_USERNAME", "REGISTRY1_PASSWORD" and standard AWS env vars.
mkdir -p .cache/go
mkdir -p .cache/go-build
echo "Running automated tests. This will take several minutes. At times it does not log anything to the console. If you interrupt the test run you will need to log into AWS console and manually delete any orphaned infrastructure."
docker run $(TTY_ARG) --rm \
-v "${PWD}:/app" \
-v "${PWD}/.cache/go:/root/go" \
-v "${PWD}/.cache/go-build:/root/.cache/go-build" \
--workdir "/app/test/e2e" \
-e GOPATH=/root/go \
-e GOCACHE=/root/.cache/go-build \
-e REPO_URL \
-e GIT_BRANCH \
-e REGISTRY1_USERNAME \
-e REGISTRY1_PASSWORD \
-e AWS_REGION \
-e AWS_DEFAULT_REGION \
-e AWS_ACCESS_KEY_ID \
-e AWS_SECRET_ACCESS_KEY \
-e AWS_SESSION_TOKEN \
-e AWS_SECURITY_TOKEN \
-e AWS_SESSION_EXPIRATION \
-e SKIP_SETUP -e SKIP_TEST \
-e SKIP_TEARDOWN \
-e AWS_AVAILABILITY_ZONE \
$(BUILD_HARNESS_REPO):$(BUILD_HARNESS_VERSION) \
bash -c 'asdf install && go test -v -timeout 2h -p 1 ./...'
.PHONY: test-ssh
test-ssh: ## Run this if you set SKIP_TEARDOWN=1 and want to SSH into the still-running test server. Don't forget to unset SKIP_TEARDOWN when you're done
cd test/tf/public-ec2-instance && terraform init
cd test/tf/public-ec2-instance/.test-data && cat Ec2KeyPair.json | jq -r .PrivateKey > privatekey.pem && chmod 600 privatekey.pem
cd test/tf/public-ec2-instance && ssh -i .test-data/privatekey.pem ubuntu@$$(terraform output public_instance_ip | tr -d '"')
########################################################################
# Cluster Section
########################################################################
cluster/reset: cluster/destroy cluster/create ## This will destroy any existing cluster and then create a new one
cluster/create: ## Create a k3d cluster with metallb installed
K3D_FIX_MOUNTS=1 k3d cluster create k3d-test-cluster --config utils/k3d/k3d-config.yaml
k3d kubeconfig merge k3d-test-cluster -o /home/${USER}/cluster-kubeconfig.yaml
echo "Installing Calico..."
kubectl apply --wait=true -f utils/calico/calico.yaml 2>&1 >/dev/null
echo "Waiting for Calico to be ready..."
kubectl rollout status deployment/calico-kube-controllers -n kube-system --watch --timeout=90s 2>&1 >/dev/null
kubectl rollout status daemonset/calico-node -n kube-system --watch --timeout=90s 2>&1 >/dev/null
kubectl wait --for=condition=Ready pods --all --all-namespaces --timeout=90s 2>&1 >/dev/null
echo
utils/metallb/install.sh
echo "Cluster is ready!"
cluster/destroy: ## Destroy the k3d cluster
k3d cluster delete k3d-test-cluster
########################################################################
# Build Section
########################################################################
build/all: build build/zarf build/zarf-init build/dubbd-k3d build/test-pkg-deps build/uds-capability-confluence ##
build: ## Create build directory
mkdir -p build
.PHONY: clean
clean: ## Clean up build files
rm -rf ./build
.PHONY: build/zarf
build/zarf: | build ## Download the Zarf to the build dir
if [ -f build/zarf ] && [ "$$(build/zarf version)" = "$(ZARF_VERSION)" ] ; then exit 0; fi && \
echo "Downloading zarf" && \
curl -sL https://github.com/defenseunicorns/zarf/releases/download/$(ZARF_VERSION)/zarf_$(ZARF_VERSION)_$(UNAME_S)_$(ARCH) -o build/zarf && \
chmod +x build/zarf
.PHONY: build/zarf-init
build/zarf-init: | build ## Download the init package
if [ -f build/zarf-init-amd64-$(ZARF_VERSION).tar.zst ] ; then exit 0; fi && \
echo "Downloading zarf-init-amd64-$(ZARF_VERSION).tar.zst" && \
curl -sL https://github.com/defenseunicorns/zarf/releases/download/$(ZARF_VERSION)/zarf-init-amd64-$(ZARF_VERSION).tar.zst -o build/zarf-init-amd64-$(ZARF_VERSION).tar.zst
.PHONY: build/dubbd-k3d
build/dubbd-k3d: | build/zarf ## Download dubbd k3d oci package
if [ -f build/zarf-package-dubbd-k3d-amd64-$(DUBBD_K3D_VERSION).tar.zst ] ; then exit 0; fi && \
cd build && ./zarf package pull oci://ghcr.io/defenseunicorns/packages/dubbd-k3d:$(DUBBD_K3D_VERSION) -a amd64 --oci-concurrency 12
build/test-pkg-deps: | build/zarf ## Build package dependencies for testing
cd build && ./zarf package create ../utils/pkg-deps/namespaces/ --skip-sbom --confirm
cd build && ./zarf package create ../utils/pkg-deps/confluence/postgres/ --skip-sbom --confirm
build/uds-capability-confluence: | build ## Build the confluence capability
cd build && ./zarf package create ../ --skip-sbom --confirm
########################################################################
# Deploy Section
########################################################################
deploy/all: deploy/init deploy/dubbd-k3d deploy/test-pkg-deps deploy/uds-capability-confluence ##
deploy/init: | build/zarf ## Deploy the zarf init package
cd build && ./zarf init --confirm --components=git-server
deploy/dubbd-k3d: | build/zarf ## Deploy the k3d flavor of DUBBD
cd build && ./zarf package deploy zarf-package-dubbd-k3d-amd64-$(DUBBD_K3D_VERSION).tar.zst --confirm
deploy/test-pkg-deps: | build/zarf ## Deploy the package dependencies needed for testing the confluence capability
cd build && ./zarf package deploy zarf-package-confluence-namespaces-* --confirm
cd build && ./zarf package deploy zarf-package-confluence-postgres* --confirm
deploy/uds-capability-confluence: ## Deploy the confluence capability
cd build && ./zarf package deploy zarf-package-confluence-amd64-*.tar.zst --confirm
########################################################################
# Macro Section
########################################################################
.PHONY: all
all: build/all cluster/reset deploy/all ## Build and deploy confluence locally
.PHONY: rebuild
rebuild: clean build/all