-
Notifications
You must be signed in to change notification settings - Fork 0
309 lines (303 loc) · 11.9 KB
/
_benchmark.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
# Generated file: !!! DO NOT EDIT !!!
---
env:
PYPERFORMANCE_HASH: 9164273e5504c410a5be08d8753c91be708fdd9a
PYSTON_BENCHMARKS_HASH: 265655e7f03ace13ec1e00e1ba299179e69f8a00
name: _benchmark
on:
workflow_call:
inputs:
fork:
description: Fork of cpython to benchmark
type: string
ref:
description: Branch, tag or (full) SHA commit to benchmark
type: string
machine:
description: Machine to run on
type: string
benchmarks:
description: Benchmarks to run (comma-separated; empty runs all benchmarks)
type: string
pgo:
description: Build with PGO
type: boolean
force:
description: Rerun and replace results if commit already exists
type: boolean
perf:
description: Collect Linux perf profiling data (Linux only)
type: boolean
tier2:
description: tier 2 interpreter
type: boolean
default: false
jit:
description: JIT
type: boolean
default: false
nogil:
description: free threading
type: boolean
default: false
workflow_dispatch:
inputs:
fork:
description: Fork of cpython to benchmark
type: string
default: python
ref:
description: Branch, tag or (full) SHA commit to benchmark
type: string
default: main
machine:
description: Machine to run on
default: linux-amd64
type: choice
options:
- linux-x86_64-linux
- linux-x86_64-vultr
- all
benchmarks:
description: Benchmarks to run (comma-separated; empty runs all benchmarks)
type: string
pgo:
description: Build with PGO
type: boolean
force:
description: Rerun and replace results if commit already exists
type: boolean
perf:
description: Collect Linux perf profiling data (Linux only)
type: boolean
tier2:
description: tier 2 interpreter
type: boolean
default: false
jit:
description: JIT
type: boolean
default: false
nogil:
description: free threading
type: boolean
default: false
jobs:
benchmark-linux-x86_64-linux:
runs-on: [self-hosted, linux, bare-metal, linux-x86_64-linux]
timeout-minutes: 1440
steps:
- name: Setup environment
run: |-
echo "BENCHMARK_MACHINE_NICKNAME=linux" >> $GITHUB_ENV
- name: Checkout benchmarking
uses: actions/checkout@v4
- name: git gc
run: |
git gc
- uses: fregante/setup-git-user@v2
- name: Setup system Python
if: ${{ runner.arch == 'X64' }}
uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Checkout CPython
uses: actions/checkout@v4
with:
persist-credentials: false
repository: ${{ inputs.fork }}/cpython
path: cpython
ref: ${{ inputs.ref }}
fetch-depth: 50
- name: Install dependencies from PyPI
run: |
rm -rf venv
python -m venv venv
venv/bin/python -m pip install --upgrade pip
venv/bin/python -m pip install -r requirements.txt
- name: Should we run?
if: ${{ always() }}
id: should_run
run: |
venv/bin/python -m bench_runner should_run ${{ inputs.force }} ${{ inputs.fork }} ${{ inputs.ref }} ${{ inputs.machine }} false ${{ inputs.tier2 == true && 'tier2' || '' }},${{ inputs.jit == true && 'jit' || '' }},${{ inputs.nogil == true && 'nogil' || '' }} >> $GITHUB_OUTPUT
- name: Checkout python-macrobenchmarks
uses: actions/checkout@v4
if: ${{ steps.should_run.outputs.should_run != 'false' }}
with:
persist-credentials: false
repository: pyston/python-macrobenchmarks
path: pyston-benchmarks
ref: ${{ env.PYSTON_BENCHMARKS_HASH }}
- name: Checkout pyperformance
uses: actions/checkout@v4
if: ${{ steps.should_run.outputs.should_run != 'false' }}
with:
persist-credentials: false
repository: mdboom/pyperformance
path: pyperformance
ref: ${{ env.PYPERFORMANCE_HASH }}
- name: Build Python
if: ${{ steps.should_run.outputs.should_run != 'false' }}
run: |
cd cpython
./configure ${{ inputs.pgo == true && '--enable-optimizations --with-lto=yes' || '' }} ${{ inputs.tier2 == true && '--enable-experimental-jit=interpreter' || '' }} ${{ inputs.jit == true && '--enable-experimental-jit=yes' || '' }} ${{ inputs.nogil == true && '--disable-gil' || '' }}
make ${{ runner.arch == 'ARM64' && '-j' || '-j4' }}
- name: Install pyperformance
if: ${{ steps.should_run.outputs.should_run != 'false' }}
run: |
venv/bin/python -m pip install ./pyperformance
- name: Tune system
if: ${{ steps.should_run.outputs.should_run != 'false' }}
run: |
sudo LD_LIBRARY_PATH=$LD_LIBRARY_PATH venv/bin/python -m pyperf system tune
- name: Tune for (Linux) perf
if: ${{ steps.should_run.outputs.should_run != 'false' && inputs.perf }}
run: |
sudo bash -c "echo 100000 > /proc/sys/kernel/perf_event_max_sample_rate"
- name: Running pyperformance
if: ${{ steps.should_run.outputs.should_run != 'false' }}
run: |
rm -rf ~/.debug/*
venv/bin/python -m bench_runner run_benchmarks ${{ inputs.perf && 'perf' || 'benchmark' }} cpython/python ${{ inputs.fork }} ${{ inputs.ref }} ${{ inputs.benchmarks || 'all' }} ${{ inputs.tier2 == true && 'tier2' || '' }},${{ inputs.jit == true && 'jit' || '' }},${{ inputs.nogil == true && 'nogil' || '' }} --run_id ${{ github.run_id }}
# Pull again, since another job may have committed results in the meantime
- name: Pull benchmarking
if: ${{ steps.should_run.outputs.should_run != 'false' && !inputs.perf }}
run: |
# Another benchmarking task may have created results for the same
# commit while the above was running. This "magic" incantation means
# that any local results for this commit will override anything we
# just pulled in in that case.
git pull -s recursive -X ours --autostash --rebase
- name: Adding data to repo
if: ${{ steps.should_run.outputs.should_run != 'false' && !inputs.perf }}
uses: EndBug/add-and-commit@v9
with:
add: results
- name: Upload benchmark artifacts
if: ${{ steps.should_run.outputs.should_run != 'false' && !inputs.perf }}
uses: actions/upload-artifact@v4
with:
name: benchmark
path: |
benchmark.json
overwrite: true
- name: Upload perf artifacts
if: ${{ steps.should_run.outputs.should_run != 'false' && inputs.perf }}
uses: actions/upload-artifact@v4
with:
name: perf
path: |
profiling/results
if: ${{ (inputs.machine == 'linux-x86_64-linux' || inputs.machine == 'all') }}
env:
flags: ${{ inputs.tier2 == true && 'tier2' || '' }},${{ inputs.jit == true &&
'jit' || '' }},${{ inputs.nogil == true && 'nogil' || '' }}
benchmark-linux-x86_64-vultr:
runs-on: [self-hosted, linux, bare-metal, linux-x86_64-vultr]
timeout-minutes: 1440
steps:
- name: Setup environment
run: |-
echo "BENCHMARK_MACHINE_NICKNAME=vultr" >> $GITHUB_ENV
- name: Checkout benchmarking
uses: actions/checkout@v4
- name: git gc
run: |
git gc
- uses: fregante/setup-git-user@v2
- name: Setup system Python
if: ${{ runner.arch == 'X64' }}
uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Checkout CPython
uses: actions/checkout@v4
with:
persist-credentials: false
repository: ${{ inputs.fork }}/cpython
path: cpython
ref: ${{ inputs.ref }}
fetch-depth: 50
- name: Install dependencies from PyPI
run: |
rm -rf venv
python -m venv venv
venv/bin/python -m pip install --upgrade pip
venv/bin/python -m pip install -r requirements.txt
- name: Should we run?
if: ${{ always() }}
id: should_run
run: |
venv/bin/python -m bench_runner should_run ${{ inputs.force }} ${{ inputs.fork }} ${{ inputs.ref }} ${{ inputs.machine }} false ${{ inputs.tier2 == true && 'tier2' || '' }},${{ inputs.jit == true && 'jit' || '' }},${{ inputs.nogil == true && 'nogil' || '' }} >> $GITHUB_OUTPUT
- name: Checkout python-macrobenchmarks
uses: actions/checkout@v4
if: ${{ steps.should_run.outputs.should_run != 'false' }}
with:
persist-credentials: false
repository: pyston/python-macrobenchmarks
path: pyston-benchmarks
ref: ${{ env.PYSTON_BENCHMARKS_HASH }}
- name: Checkout pyperformance
uses: actions/checkout@v4
if: ${{ steps.should_run.outputs.should_run != 'false' }}
with:
persist-credentials: false
repository: mdboom/pyperformance
path: pyperformance
ref: ${{ env.PYPERFORMANCE_HASH }}
- name: Build Python
if: ${{ steps.should_run.outputs.should_run != 'false' }}
run: |
cd cpython
./configure ${{ inputs.pgo == true && '--enable-optimizations --with-lto=yes' || '' }} ${{ inputs.tier2 == true && '--enable-experimental-jit=interpreter' || '' }} ${{ inputs.jit == true && '--enable-experimental-jit=yes' || '' }} ${{ inputs.nogil == true && '--disable-gil' || '' }}
make ${{ runner.arch == 'ARM64' && '-j' || '-j4' }}
- name: Install pyperformance
if: ${{ steps.should_run.outputs.should_run != 'false' }}
run: |
venv/bin/python -m pip install ./pyperformance
- name: Tune system
if: ${{ steps.should_run.outputs.should_run != 'false' }}
run: |
sudo LD_LIBRARY_PATH=$LD_LIBRARY_PATH venv/bin/python -m pyperf system tune
- name: Tune for (Linux) perf
if: ${{ steps.should_run.outputs.should_run != 'false' && inputs.perf }}
run: |
sudo bash -c "echo 100000 > /proc/sys/kernel/perf_event_max_sample_rate"
- name: Running pyperformance
if: ${{ steps.should_run.outputs.should_run != 'false' }}
run: |
rm -rf ~/.debug/*
venv/bin/python -m bench_runner run_benchmarks ${{ inputs.perf && 'perf' || 'benchmark' }} cpython/python ${{ inputs.fork }} ${{ inputs.ref }} ${{ inputs.benchmarks || 'all' }} ${{ inputs.tier2 == true && 'tier2' || '' }},${{ inputs.jit == true && 'jit' || '' }},${{ inputs.nogil == true && 'nogil' || '' }} --run_id ${{ github.run_id }}
# Pull again, since another job may have committed results in the meantime
- name: Pull benchmarking
if: ${{ steps.should_run.outputs.should_run != 'false' && !inputs.perf }}
run: |
# Another benchmarking task may have created results for the same
# commit while the above was running. This "magic" incantation means
# that any local results for this commit will override anything we
# just pulled in in that case.
git pull -s recursive -X ours --autostash --rebase
- name: Adding data to repo
if: ${{ steps.should_run.outputs.should_run != 'false' && !inputs.perf }}
uses: EndBug/add-and-commit@v9
with:
add: results
- name: Upload benchmark artifacts
if: ${{ steps.should_run.outputs.should_run != 'false' && !inputs.perf }}
uses: actions/upload-artifact@v4
with:
name: benchmark
path: |
benchmark.json
overwrite: true
- name: Upload perf artifacts
if: ${{ steps.should_run.outputs.should_run != 'false' && inputs.perf }}
uses: actions/upload-artifact@v4
with:
name: perf
path: |
profiling/results
if: ${{ (inputs.machine == 'linux-x86_64-vultr' || inputs.machine == 'all') }}
env:
flags: ${{ inputs.tier2 == true && 'tier2' || '' }},${{ inputs.jit == true &&
'jit' || '' }},${{ inputs.nogil == true && 'nogil' || '' }}