-
Notifications
You must be signed in to change notification settings - Fork 1.3k
Expand file tree
/
Copy pathexp_scaletest_taskstatus.go
More file actions
281 lines (248 loc) · 8.36 KB
/
exp_scaletest_taskstatus.go
File metadata and controls
281 lines (248 loc) · 8.36 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
//go:build !slim
package cli
import (
"context"
"fmt"
"net/http"
"sync"
"time"
"github.com/google/uuid"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"golang.org/x/xerrors"
"cdr.dev/slog/v3"
"cdr.dev/slog/v3/sloggers/sloghuman"
"github.com/coder/coder/v2/codersdk"
"github.com/coder/coder/v2/scaletest/harness"
"github.com/coder/coder/v2/scaletest/loadtestutil"
"github.com/coder/coder/v2/scaletest/taskstatus"
"github.com/coder/serpent"
)
const (
taskStatusTestName = "task-status"
)
func (r *RootCmd) scaletestTaskStatus() *serpent.Command {
var (
count int64
template string
workspaceNamePrefix string
appSlug string
reportStatusPeriod time.Duration
reportStatusDuration time.Duration
baselineDuration time.Duration
tracingFlags = &scaletestTracingFlags{}
prometheusFlags = &scaletestPrometheusFlags{}
timeoutStrategy = &timeoutFlags{}
cleanupStrategy = newScaletestCleanupStrategy()
output = &scaletestOutputFlags{}
)
orgContext := NewOrganizationContext()
cmd := &serpent.Command{
Use: "task-status",
Short: "Generates load on the Coder server by simulating task status reporting",
Long: `This test creates external workspaces and simulates AI agents reporting task status.
After all runners connect, it waits for the baseline duration before triggering status reporting.`,
Handler: func(inv *serpent.Invocation) error {
ctx := inv.Context()
outputs, err := output.parse()
if err != nil {
return xerrors.Errorf("could not parse --output flags: %w", err)
}
client, err := r.InitClient(inv)
if err != nil {
return err
}
org, err := orgContext.Selected(inv, client)
if err != nil {
return err
}
_, err = requireAdmin(ctx, client)
if err != nil {
return err
}
// Disable rate limits for this test
client.HTTPClient = &http.Client{
Transport: &codersdk.HeaderTransport{
Transport: http.DefaultTransport,
Header: map[string][]string{
codersdk.BypassRatelimitHeader: {"true"},
},
},
}
// Find the template
tpl, err := parseTemplate(ctx, client, []uuid.UUID{org.ID}, template)
if err != nil {
return xerrors.Errorf("parse template %q: %w", template, err)
}
templateID := tpl.ID
reg := prometheus.NewRegistry()
metrics := taskstatus.NewMetrics(reg)
logger := slog.Make(sloghuman.Sink(inv.Stdout)).Leveled(slog.LevelDebug)
prometheusSrvClose := ServeHandler(ctx, logger, promhttp.HandlerFor(reg, promhttp.HandlerOpts{}), prometheusFlags.Address, "prometheus")
defer prometheusSrvClose()
tracerProvider, closeTracing, tracingEnabled, err := tracingFlags.provider(ctx)
if err != nil {
return xerrors.Errorf("create tracer provider: %w", err)
}
defer func() {
// Allow time for traces to flush even if command context is
// canceled. This is a no-op if tracing is not enabled.
_, _ = fmt.Fprintln(inv.Stderr, "\nUploading traces...")
if err := closeTracing(ctx); err != nil {
_, _ = fmt.Fprintf(inv.Stderr, "\nError uploading traces: %+v\n", err)
}
// Wait for prometheus metrics to be scraped
_, _ = fmt.Fprintf(inv.Stderr, "Waiting %s for prometheus metrics to be scraped\n", prometheusFlags.Wait)
<-time.After(prometheusFlags.Wait)
}()
tracer := tracerProvider.Tracer(scaletestTracerName)
// Setup shared resources for coordination
connectedWaitGroup := &sync.WaitGroup{}
connectedWaitGroup.Add(int(count))
startReporting := make(chan struct{})
// Create the test harness
th := harness.NewTestHarness(
timeoutStrategy.wrapStrategy(harness.ConcurrentExecutionStrategy{}),
cleanupStrategy.toStrategy(),
)
// Create runners
for i := range count {
workspaceName := fmt.Sprintf("%s-%d", workspaceNamePrefix, i)
cfg := taskstatus.Config{
TemplateID: templateID,
WorkspaceName: workspaceName,
AppSlug: appSlug,
ConnectedWaitGroup: connectedWaitGroup,
StartReporting: startReporting,
ReportStatusPeriod: reportStatusPeriod,
ReportStatusDuration: reportStatusDuration,
Metrics: metrics,
MetricLabelValues: []string{},
}
if err := cfg.Validate(); err != nil {
return xerrors.Errorf("validate config for runner %d: %w", i, err)
}
// use an independent client for each Runner, so they don't reuse TCP connections. This can lead to
// requests being unbalanced among Coder instances.
runnerClient, err := loadtestutil.DupClientCopyingHeaders(client, BypassHeader)
if err != nil {
return xerrors.Errorf("create runner client: %w", err)
}
var runner harness.Runnable = taskstatus.NewRunner(runnerClient, cfg)
if tracingEnabled {
runner = &runnableTraceWrapper{
tracer: tracer,
spanName: fmt.Sprintf("%s/%d", taskStatusTestName, i),
runner: runner,
}
}
th.AddRun(taskStatusTestName, workspaceName, runner)
}
// Start the test in a separate goroutine so we can coordinate timing
testCtx, testCancel := timeoutStrategy.toContext(ctx)
defer testCancel()
testDone := make(chan error)
go func() {
testDone <- th.Run(testCtx)
}()
// Wait for all runners to connect
logger.Info(ctx, "waiting for all runners to connect")
waitCtx, waitCancel := context.WithTimeout(ctx, 5*time.Minute)
defer waitCancel()
connectDone := make(chan struct{})
go func() {
connectedWaitGroup.Wait()
close(connectDone)
}()
select {
case <-waitCtx.Done():
return xerrors.Errorf("timeout waiting for runners to connect")
case <-connectDone:
logger.Info(ctx, "all runners connected")
}
// Wait for baseline duration
logger.Info(ctx, "waiting for baseline duration", slog.F("duration", baselineDuration))
select {
case <-ctx.Done():
return ctx.Err()
case <-time.After(baselineDuration):
}
// Trigger all runners to start reporting
logger.Info(ctx, "triggering runners to start reporting task status")
close(startReporting)
// Wait for the test to complete
err = <-testDone
if err != nil {
return xerrors.Errorf("run test harness: %w", err)
}
res := th.Results()
for _, o := range outputs {
err = o.write(res, inv.Stdout)
if err != nil {
return xerrors.Errorf("write output %q to %q: %w", o.format, o.path, err)
}
}
cleanupCtx, cleanupCancel := cleanupStrategy.toContext(ctx)
defer cleanupCancel()
err = th.Cleanup(cleanupCtx)
if err != nil {
return xerrors.Errorf("cleanup tests: %w", err)
}
if res.TotalFail > 0 {
return xerrors.New("load test failed, see above for more details")
}
return nil
},
}
cmd.Options = serpent.OptionSet{
{
Flag: "count",
Description: "Number of concurrent runners to create.",
Default: "10",
Value: serpent.Int64Of(&count),
},
{
Flag: "template",
Description: "Name or UUID of the template to use for the scale test. The template MUST include a coder_external_agent and a coder_app.",
Default: "scaletest-task-status",
Value: serpent.StringOf(&template),
},
{
Flag: "workspace-name-prefix",
Description: "Prefix for workspace names (will be suffixed with index).",
Default: "scaletest-task-status",
Value: serpent.StringOf(&workspaceNamePrefix),
},
{
Flag: "app-slug",
Description: "Slug of the app designated as the AI Agent.",
Default: "ai-agent",
Value: serpent.StringOf(&appSlug),
},
{
Flag: "report-status-period",
Description: "Time between reporting task statuses.",
Default: "10s",
Value: serpent.DurationOf(&reportStatusPeriod),
},
{
Flag: "report-status-duration",
Description: "Total time to report task statuses after baseline.",
Default: "15m",
Value: serpent.DurationOf(&reportStatusDuration),
},
{
Flag: "baseline-duration",
Description: "Duration to wait after all runners connect before starting to report status.",
Default: "10m",
Value: serpent.DurationOf(&baselineDuration),
},
}
orgContext.AttachOptions(cmd)
output.attach(&cmd.Options)
tracingFlags.attach(&cmd.Options)
prometheusFlags.attach(&cmd.Options)
timeoutStrategy.attach(&cmd.Options)
cleanupStrategy.attach(&cmd.Options)
return cmd
}