-
-
Notifications
You must be signed in to change notification settings - Fork 477
343 lines (300 loc) · 10.8 KB
/
benchmark.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
# Benchmarks are sharded.
#
# Each benchmark (parser, transformer, etc) runs in parallel in a separate job.
# Linter benchmarks are much slower to build and run than the rest, so linter benchmark
# is built in 1 job, and then run on each fixture in parallel in separate jobs.
# When all jobs are complete, a final job uploads all the results to CodSpeed.
#
# Sharding is not natively supported by CodSpeed, so we use a hacky method to achieve it.
# 1. Intercept the data which `CodSpeedHQ/action` would normally upload to CodSpeed for each job.
# 2. Once all runs are complete, combine the data for all the runs together.
# 3. Upload the combined data to CodSpeed as one.
# This is performed by some short NodeJS scripts in `tasks/benchmark/codspeed`.
name: Benchmark
on:
workflow_dispatch:
pull_request:
types: [opened, synchronize]
paths:
- '**/*.rs'
- 'napi/parser/**/*.js'
- 'napi/parser/**/*.mjs'
- 'Cargo.lock'
- 'rust-toolchain.toml'
- '.github/workflows/benchmark.yml'
- 'tasks/benchmark/codspeed/*.mjs'
push:
branches:
- main
paths:
- '**/*.rs'
- 'napi/parser/**/*.js'
- 'napi/parser/**/*.mjs'
- 'Cargo.lock'
- 'rust-toolchain.toml'
- '.github/workflows/benchmark.yml'
- 'tasks/benchmark/codspeed/*.mjs'
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}
cancel-in-progress: true
defaults:
run:
shell: bash
jobs:
# Build and run benchmarks for all components except linter
benchmark:
name: Benchmark
runs-on: ubuntu-latest
strategy:
fail-fast: true
matrix:
component:
- lexer
- parser
- transformer
- isolated_declarations
- semantic
- minifier
- codegen
- sourcemap
steps:
- name: Checkout Branch
uses: taiki-e/checkout-action@v1
- uses: Boshen/setup-rust@main
with:
cache-key: benchmark-${{ matrix.component }}
save-cache: ${{ github.ref_name == 'main' }}
tools: cargo-codspeed
- name: Install pnpm
uses: pnpm/action-setup@v4.0.0
with:
package_json_file: ./tasks/benchmark/codspeed/package.json
- name: Install Node.js
uses: actions/setup-node@v4
with:
node-version-file: .node-version
cache-dependency-path: ./tasks/benchmark/codspeed/package.json
cache: pnpm
- name: Start bench results interceptor server
working-directory: ./tasks/benchmark/codspeed
env:
COMPONENT: ${{ matrix.component }}
run: |
pnpm install
pnpm run start &
- name: Build benchmark
env:
RUSTFLAGS: "-C debuginfo=1 -C strip=none -g --cfg codspeed"
run: |
cargo build --release -p oxc_benchmark --bench ${{ matrix.component }} \
--no-default-features --features ${{ matrix.component }} --features codspeed
mkdir -p target/codspeed/oxc_benchmark
mv target/release/deps/${{ matrix.component }}-* target/codspeed/oxc_benchmark
rm target/codspeed/oxc_benchmark/*.d
- name: Run benchmark
uses: CodSpeedHQ/action@v3
timeout-minutes: 30
with:
# Dummy token for tokenless runs, to suppress logging hash of metadata JSON (see `upload.mjs`)
token: ${{ secrets.CODSPEED_TOKEN || 'dummy' }}
upload-url: http://localhost:${{ env.INTERCEPT_PORT }}/upload
run: cargo codspeed run
- name: Upload bench data artefact
uses: actions/upload-artifact@v4
with:
name: result-${{ matrix.component }}
path: ${{ env.DATA_DIR }} # env.DATA_DIR from `capture.mjs`
if-no-files-found: error
retention-days: 1
# Build linter benchmark.
# Linter benchmarks are much slower than the rest, so we run each fixture in a separate job.
# But only build the linter benchmark once.
build-linter:
name: Build Linter Benchmark
runs-on: ubuntu-latest
steps:
- name: Checkout Branch
uses: taiki-e/checkout-action@v1
- uses: Boshen/setup-rust@main
with:
cache-key: benchmark-linter
save-cache: ${{ github.ref_name == 'main' }}
- uses: mozilla-actions/sccache-action@v0.0.5
if: ${{ vars.USE_SCCACHE == 'true' }}
- name: Build benchmark
env:
RUSTFLAGS: "-C debuginfo=1 -C strip=none -g --cfg codspeed"
run: |
cargo build --release -p oxc_benchmark --bench linter \
--no-default-features --features linter --features codspeed
mkdir -p target/codspeed/oxc_benchmark
mv target/release/deps/linter-* target/codspeed/oxc_benchmark
rm target/codspeed/oxc_benchmark/*.d
- name: Upload Binary
uses: actions/upload-artifact@v4
with:
if-no-files-found: error
name: benchmark-linter
path: ./target/codspeed/oxc_benchmark
retention-days: 1
# Run linter benchmarks. Each fixture in a separate job.
benchmark-linter:
name: Benchmark linter
needs: build-linter
runs-on: ubuntu-latest
strategy:
fail-fast: true
matrix:
fixture:
- 0
- 1
steps:
- name: Checkout Branch
uses: taiki-e/checkout-action@v1
- name: Download Binary
uses: actions/download-artifact@v4
with:
name: benchmark-linter
path: ./target/codspeed/oxc_benchmark
- name: Fix permission loss
run: |
ls ./target/codspeed/oxc_benchmark
chmod +x ./target/codspeed/oxc_benchmark/*
- name: Install codspeed
uses: taiki-e/install-action@v2
with:
tool: cargo-codspeed
- name: Install Node.js
uses: actions/setup-node@v4
with:
node-version-file: .node-version
registry-url: 'https://registry.npmjs.org'
- name: Start bench results interceptor server
working-directory: ./tasks/benchmark/codspeed
env:
COMPONENT: linter
FIXTURE: ${{ matrix.fixture }}
run: |
corepack enable
pnpm install
node capture.mjs &
- name: Run benchmark
uses: CodSpeedHQ/action@v3
timeout-minutes: 30
env:
FIXTURE: ${{ matrix.fixture }}
with:
# Dummy token for tokenless runs, to suppress logging hash of metadata JSON (see `upload.mjs`)
token: ${{ secrets.CODSPEED_TOKEN || 'dummy' }}
upload-url: http://localhost:${{ env.INTERCEPT_PORT }}/upload
run: cargo codspeed run
- name: Upload bench data artefact
uses: actions/upload-artifact@v4
with:
name: result-linter${{ matrix.fixture }}
path: ${{ env.DATA_DIR }} # env.DATA_DIR from `capture.mjs`
if-no-files-found: error
retention-days: 1
# benchmark-napi:
# name: Benchmark NAPI parser
# runs-on: ubuntu-latest
# if: false
# steps:
# - name: Checkout Branch
# uses: taiki-e/checkout-action@v1
# - uses: Boshen/setup-rust@main
# with:
# shared-key: benchmark_napi
# save-cache: ${{ github.ref_name == 'main' }}
# - name: Install codspeed
# uses: taiki-e/install-action@v2
# with:
# tool: cargo-codspeed
# - name: Install Node.js
# uses: actions/setup-node@v4
# with:
# node-version-file: .node-version
# registry-url: 'https://registry.npmjs.org'
# - name: Start bench results interceptor server
# working-directory: ./tasks/benchmark/codspeed
# env:
# COMPONENT: 'parser_napi'
# run: |
# corepack enable
# pnpm install
# node capture.mjs &
# # CodSpeed gets measurements completely off for NAPI if run in `CodSpeedHQ/action`,
# # so instead run real benchmark without CodSpeed's instrumentation and save the results.
# # Then "Run Rust benchmark" step below runs a loop of some simple Rust code the number
# # of times required to take same amount of time as the real benchmark took.
# # This is all a workaround for https://github.com/CodSpeedHQ/action/issues/96
# - name: Build NAPI Benchmark
# working-directory: ./napi/parser
# run: |
# corepack enable
# pnpm install
# pnpm build
# - name: Run NAPI Benchmark
# working-directory: ./napi/parser
# run: node parse.bench.mjs
# - name: Build Rust benchmark
# env:
# RUSTFLAGS: "-C debuginfo=2 -C strip=none -g --cfg codspeed"
# run: |
# cargo build --release -p oxc_benchmark --bench parser_napi --no-default-features --features codspeed_napi
# mkdir -p target/codspeed/oxc_benchmark/
# mv target/release/deps/parser_napi-* target/codspeed/oxc_benchmark
# rm -rf target/codspeed/oxc_benchmark/*.d
# - name: Run Rust benchmark
# uses: CodSpeedHQ/action@v2
# timeout-minutes: 30
# with:
# run: cargo codspeed run
# # Dummy token for tokenless runs, to suppress logging hash of metadata JSON (see `upload.mjs`)
# token: ${{ secrets.CODSPEED_TOKEN || 'dummy' }}
# upload-url: http://localhost:${{ env.INTERCEPT_PORT }}/upload
# - name: Upload bench data artefact
# uses: actions/upload-artifact@v4
# with:
# name: 'parser_napi'
# path: ${{ env.DATA_DIR }}
# if-no-files-found: error
# retention-days: 1
# Upload combined benchmark results to CodSpeed
upload:
name: Upload benchmarks
# needs: [benchmark, benchmark-linter, benchmark-napi]
needs: [benchmark, benchmark-linter]
runs-on: ubuntu-latest
steps:
- name: Checkout Branch
uses: taiki-e/checkout-action@v1
- name: Install Node.js
uses: actions/setup-node@v4
with:
node-version-file: .node-version
registry-url: 'https://registry.npmjs.org'
- name: Create temp dir
working-directory: ./tasks/benchmark/codspeed
run: |
corepack enable
pnpm install
node create_temp_dir.mjs
- name: Download artefacts
uses: actions/download-artifact@v4
with:
merge-multiple: true
pattern: result-*
path: ${{ env.DATA_DIR }} # env.DATA_DIR from `create_temp_dir.mjs`
- name: Upload to Codspeed
working-directory: ./tasks/benchmark/codspeed
env:
CODSPEED_TOKEN: ${{ secrets.CODSPEED_TOKEN }}
run: node upload.mjs
- name: Delete temporary artefacts
uses: geekyeggo/delete-artifact@v5
with:
name: |
result-*
benchmark-linter
failOnError: false