From fe290423d6bc1fe00f786d4a98aed1fd91b5b5af Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20R=C3=BCger?= Date: Thu, 8 Feb 2024 23:54:15 +0100 Subject: [PATCH] Add benchmark tooling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This adds a way to compare benchmarks that we use in kube-state-metrics already, hopefully allowing for better comparisons when applying changes. Co-authored-by: Manuel RĂ¼ger Signed-off-by: Ivan Valdes --- .github/workflows/benchmark-pr.yaml | 61 ++++++++++++++++++++++++++++ Makefile | 11 +++++ scripts/compare_benchmarks.sh | 63 +++++++++++++++++++++++++++++ 3 files changed, 135 insertions(+) create mode 100644 .github/workflows/benchmark-pr.yaml create mode 100755 scripts/compare_benchmarks.sh diff --git a/.github/workflows/benchmark-pr.yaml b/.github/workflows/benchmark-pr.yaml new file mode 100644 index 000000000..ffa5ba080 --- /dev/null +++ b/.github/workflows/benchmark-pr.yaml @@ -0,0 +1,61 @@ +--- +name: Benchmarks on AMD64 +permissions: read-all +on: [pull_request] +jobs: + benchmark-pull-request: + runs-on: ubuntu-latest-8-cores + steps: + - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 + with: + fetch-depth: 0 + - id: goversion + run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" + - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 + with: + go-version: ${{ steps.goversion.outputs.goversion }} + - name: Install Benchstat + run: make install-benchstat + - name: Run Benchmarks + run: | + BENCHSTAT_FORMAT=csv BENCHSTAT_OUTPUT_FILE=result.csv make test-benchmark-compare REF=${{ github.event.pull_request.head.sha }} + echo "$(head -n1 result.csv),,," > output.csv # Add three missing cols from header + tail -n+2 result.csv >> output.csv + - name: Read CSV + id: read-csv + uses: juliangruber/read-file-action@b549046febe0fe86f8cb4f93c24e284433f9ab58 # v1.1.7 + with: + path: ./output.csv + - name: Create Markdown Table + uses: petems/csv-to-md-table-action@401501a2cdf2512164c1be3b70411976a2b838b9 # v4.0.0 + id: csv-table-output + with: + csvinput: ${{ steps.read-csv.outputs.content }} + - run: | + echo "${{ steps.csv-table-output.outputs.markdown-table }}" >> "$GITHUB_STEP_SUMMARY" + cat <> "$GITHUB_STEP_SUMMARY" +
+ + This section contains three tables generated by benchstat: + + 1. Seconds per operation. + 2. Bytes per operation. + 3. Allocations per operation. + + The tables show the median and 75% confidence interval (CI) summaries for each benchmark comparing the HEAD and the BASE of the Pull Request, and an A/B comparison under "vs base". The last column shows the statistical p-value with three runs (n=3). + + The last row has the Geometric Mean (geomean) for the given rows in the table. + + Refer to [benchstat's documentation](https://pkg.go.dev/golang.org/x/perf/cmd/benchstat) for more help. + EOL + - name: Validate results under acceptable limit + run: | + export MAX_ACCEPTABLE_DIFFERENCE=5 + while IFS= read -r line; do + if [[ "$line" = +* ]] || [[ "$line" = -* ]]; then + if (( $(echo "${line//[^0-9.]/}"'>'"$MAX_ACCEPTABLE_DIFFERENCE" | bc -l) )); then + echo "::error::$line is above the maximum acceptable difference ($MAX_ACCEPTABLE_DIFFERENCE)" + exit 1 + fi + fi + done "${RESULT_CURRENT}".tmp && mv "${RESULT_CURRENT}".tmp "${RESULT_CURRENT}" + +echo "" +echo "### Done testing ${REF_CURRENT}" + +echo "" +echo "### Testing ${REF_TO_COMPARE}" + +git checkout "${REF_TO_COMPARE}" + +go test -timeout="${TIMEOUT}" -count="${BENCH_COUNT}" -benchmem -run=NONE -bench=. ./... | tee "${RESULT_TO_COMPARE}" + +# Filter benchark lines, so benchstat can parse the output. +grep ^Benchmark "${RESULT_TO_COMPARE}" > "${RESULT_TO_COMPARE}".tmp && mv "${RESULT_TO_COMPARE}".tmp "${RESULT_TO_COMPARE}" + +echo "" +echo "### Done testing ${REF_TO_COMPARE}" + +git checkout - + +echo "" +echo "### Result" +echo "old=${REF_TO_COMPARE} new=${REF_CURRENT}" + +if [[ "${BENCHSTAT_FORMAT}" == "csv" ]]; then + benchstat -format=csv -confidence="${BENCHSTAT_CONFIDENCE_LEVEL}" BASE="${RESULT_TO_COMPARE}" HEAD="${RESULT_CURRENT}" 2>/dev/null 1>"${BENCHSTAT_OUTPUT_FILE}" +else + benchstat -confidence="${BENCHSTAT_CONFIDENCE_LEVEL}" BASE="${RESULT_TO_COMPARE}" HEAD="${RESULT_CURRENT}" +fi