-
Notifications
You must be signed in to change notification settings - Fork 43
174 lines (162 loc) · 5.99 KB
/
main.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
name: CI
on: [push, pull_request]
env:
COMPOSE_FILE: docker-compose.ci.yml
TEST_IMAGE_REPO: ghcr.io/18f/api.data.gov
jobs:
test:
runs-on: ubuntu-latest
outputs:
image-tag: ${{ steps.image-metadata.outputs.version }}
steps:
- uses: actions/checkout@v3
with:
submodules: recursive
- name: Container metadata
id: image-metadata
uses: docker/metadata-action@v4
with:
images: ${{ env.TEST_IMAGE_REPO }}
flavor: |
prefix=test-image-
- name: Login to GitHub Container Registry
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Docker build
run: docker buildx bake -f "${{ env.COMPOSE_FILE }}" --push
env:
TEST_IMAGE_TAG: ${{ steps.image-metadata.outputs.version }}
- name: ESLint
run: docker compose run --rm web yarn run lint --max-warnings 0
env:
TEST_IMAGE_TAG: ${{ steps.image-metadata.outputs.version }}
- name: Prettier Check
run: docker compose run --rm web yarn run prettier:check
env:
TEST_IMAGE_TAG: ${{ steps.image-metadata.outputs.version }}
build:
runs-on: ubuntu-latest
needs: test
strategy:
matrix:
deploy_env: [production, staging]
include:
- deploy_env: production
web_site_root_secret_name: WEB_SITE_ROOT
- deploy_env: staging
web_site_root_secret_name: STAGING_WEB_SITE_ROOT
- deploy_env: production
signup_api_key_secret_name: API_KEY
- deploy_env: staging
signup_api_key_secret_name: STAGING_API_KEY
- deploy_env: production
contact_api_key_secret_name: CONTACT_API_KEY
- deploy_env: staging
contact_api_key_secret_name: STAGING_CONTACT_API_KEY
steps:
- uses: actions/checkout@v3
with:
submodules: recursive
- name: Login to GitHub Container Registry
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
# Build
- name: Build
env:
TEST_IMAGE_TAG: ${{ needs.test.outputs.image-tag }}
run: |
docker compose run \
-e "HUGO_PARAMS_APIUMBRELLACONTACTAPIKEY=${{ secrets[matrix.contact_api_key_secret_name] }}" \
-e "HUGO_PARAMS_APIUMBRELLASIGNUPAPIKEY=${{ secrets[matrix.signup_api_key_secret_name] }}" \
--rm \
web \
hugo \
--minify \
--baseURL "${{ secrets[matrix.web_site_root_secret_name] }}"
- uses: actions/upload-artifact@v1
with:
name: build-${{ matrix.deploy_env }}
path: ./public
deploy:
if: success() && github.repository_owner == '18F' && github.ref == 'refs/heads/main'
needs: build
runs-on: ubuntu-latest
container:
image: rclone/rclone:1.62.2
strategy:
matrix:
deploy_env: [production, staging]
include:
- deploy_env: production
bucket_name_secret_name: BUCKET_NAME
- deploy_env: staging
bucket_name_secret_name: STAGING_BUCKET_NAME
- deploy_env: production
aws_access_key_id_secret_name: AWS_ACCESS_KEY_ID
- deploy_env: staging
aws_access_key_id_secret_name: STAGING_AWS_ACCESS_KEY_ID
- deploy_env: production
aws_secret_access_key_secret_name: AWS_SECRET_ACCESS_KEY
- deploy_env: staging
aws_secret_access_key_secret_name: STAGING_AWS_SECRET_ACCESS_KEY
- deploy_env: production
aws_default_region_secret_name: AWS_DEFAULT_REGION
- deploy_env: staging
aws_default_region_secret_name: STAGING_AWS_DEFAULT_REGION
steps:
# Deploy to S3 bucket.
- uses: actions/download-artifact@v1
with:
name: build-${{ matrix.deploy_env }}
path: ./public
- name: Deploy
env:
RCLONE_S3_ACCESS_KEY_ID: ${{ secrets[matrix.aws_access_key_id_secret_name] }}
RCLONE_S3_SECRET_ACCESS_KEY: ${{ secrets[matrix.aws_secret_access_key_secret_name] }}
RCLONE_S3_REGION: ${{ secrets[matrix.aws_default_region_secret_name] }}
S3_DEST: ":s3:${{ secrets[matrix.bucket_name_secret_name] }}/"
run: |
# Identify cache-busted assets by the fingerprint in the filename.
cache_busted_assets="/{images,javascripts,stylesheets}/**.{\w\w\w\w\w\w\w\w\w\w\w\w\w\w\w\w\w\w\w\w\w\w\w\w\w\w\w\w\w\w\w\w\w\w\w\w\w\w\w\w\w\w\w\w\w\w\w\w\w\w\w\w\w\w\w\w\w\w\w\w\w\w\w\w}.*"
# Sync all cache-busted assets with long cache-control expirations.
rclone \
copy \
--verbose \
--checksum \
--no-update-modtime \
--s3-no-check-bucket \
--s3-no-head \
--header-upload "Cache-Control: public, max-age=31536000, immutable" \
--include "$cache_busted_assets" \
./public/ \
"$S3_DEST"
# Sync the remaining files, disallowing caching on those.
rclone \
copy \
--verbose \
--checksum \
--no-update-modtime \
--s3-no-check-bucket \
--s3-no-head \
--header-upload "Cache-Control: no-cache, max-age=0, must-revalidate" \
./public/ \
"$S3_DEST"
# Run the sync one more time to delete old files. Keep old asset
# files around, so that if old HTML pages continue to load for a few
# minutes, they can still load older assets.
rclone \
sync \
--verbose \
--checksum \
--no-update-modtime \
--s3-no-check-bucket \
--s3-no-head \
--exclude "$cache_busted_assets" \
./public/ \
"$S3_DEST"