-
Notifications
You must be signed in to change notification settings - Fork 45
140 lines (126 loc) · 5.17 KB
/
performance-regression.yml
File metadata and controls
140 lines (126 loc) · 5.17 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
name: Performance Regression
on:
pull_request:
paths:
# Workflow will only run if something in this path is changed
- 'amazon/**'
- '.github/workflows/performance-regression.yml' # Also trigger if the workflow itself changes.
env:
report_statistics: 'file_size,time_mean,time_error,ops/s_mean,ops/s_error,memory_usage_peak'
compare_statistics: 'file_size,time_mean'
data_size: '100'
spec_defaults: '{warmups:100,iterations:100}'
specs: '{command:read,format:ion_text} {command:write,format:ion_text} {command:read,format:ion_binary} {command:write,format:ion_binary}'
test_data_id: 'generated-test-data'
run_cli: 'python src-python/amazon/ionbenchmark/ion_benchmark_cli.py'
jobs:
generate-test-data:
name: Generate Data
runs-on: ubuntu-latest
steps:
# Generates data used for benchmarking
- name: Checkout ion-data-generator
uses: actions/checkout@v4
with:
repository: amazon-ion/ion-data-generator
ref: main
- name: Build ion-data-generator
run: mvn clean install
- name: Generate test Ion Data
env:
jar_file: target/ion-data-generator-1.0-SNAPSHOT.jar
schema_dir: tst/com/amazon/ion/workflow
run: |
mkdir -p testData
for test in nestedStruct nestedList sexp realWorldDataSchema01 realWorldDataSchema02 realWorldDataSchema03
do
java -jar "$jar_file" generate -S "${{env.data_size}}" --input-ion-schema "$schema_dir/${test}.isl" "testData/${test}.10n"
done
- name: Upload test Ion Data to artifacts
uses: actions/upload-artifact@v4
with:
name: ${{env.test_data_id}}
path: testData
prepopulate-pip-cache:
# Since all the "Check" jobs can run in parallel, caching _could_ have basically no effect. In order to speed things
# up, this step can run in parallel with the "Generate Data" job, pre-caching all the dependencies.
name: Setup PIP Cache
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: '3.13'
cache: 'pip'
cache-dependency-path: pyproject.toml
- run: |
python -m pip freeze '.[test,benchmarking]' > requirements.txt # capture only dependencies.
python -m pip install -r requirements.txt
detect-regression:
name: Check
runs-on: ubuntu-latest
needs: [generate-test-data, prepopulate-pip-cache]
strategy:
matrix:
python-version: ['3.13']
test-data: ['nestedStruct', 'nestedList', 'sexp', 'realWorldDataSchema01', 'realWorldDataSchema02', 'realWorldDataSchema03']
fail-fast: false
steps:
- name: Checkout the base of the PR
uses: actions/checkout@v4
with:
ref: ${{ github.base_ref }}
submodules: recursive
path: baseline
- name: Checkout the head of the PR
uses: actions/checkout@v4
with:
repository: ${{ github.event.pull_request.head.repo.full_name }}
ref: ${{ github.head_ref }}
submodules: recursive
path: new
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
cache: 'pip'
cache-dependency-path: pyproject.toml
- name: Download Test Data
id: 'download'
uses: actions/download-artifact@v4
with:
name: ${{env.test_data_id}}
# Generates performance results for the previous commit
- name: Create a virtual environment for baseline
working-directory: ./baseline
run: pip install '.[test,benchmarking]'
- name: Run baseline performance benchmark
id: 'baseline'
working-directory: ./baseline
run: |
${{env.run_cli}} spec '${{env.specs}}' -d '${{env.spec_defaults}}' \
-O '{input_file:"${{steps.download.outputs.download-path}}/${{ matrix.test-data }}.10n"}' \
-o "$PWD/report.ion" -r '${{env.report_statistics}}'
echo "::group::Ion Report"
echo "$(<report.ion)"
echo "::endgroup::"
echo "report=$PWD/report.ion" >> "$GITHUB_OUTPUT"
# Generates performance results for the current commit
- name: Create a virtual environment for PR changes
working-directory: ./new
run: pip install '.[test,benchmarking]'
- name: Run new performance benchmark
id: 'new'
working-directory: ./new
run: |
${{env.run_cli}} spec '${{env.specs}}' -d '${{env.spec_defaults}}' \
-O '{input_file:"${{steps.download.outputs.download-path}}/${{ matrix.test-data }}.10n"}' \
-o "$PWD/report.ion" -r '${{env.report_statistics}}'
echo "::group::Ion Report"
echo "$(<report.ion)"
echo "::endgroup::"
echo "report=$PWD/report.ion" >> "$GITHUB_OUTPUT"
# Compare results and identify regression
- name: Detect performance regression
working-directory: ./new
run: ${{env.run_cli}} compare --fail ${{steps.baseline.outputs.report}} ${{steps.new.outputs.report}} -c '${{env.compare_statistics}}'