@@ -22,81 +22,105 @@ mkdir -p ${RESULTS_DIR}
2222
2323# Run nexmark benchmark
2424MAX_EVENTS=100000000
25- GENERATORS=8
26- CORES=6
2725if [ " $SMOKE " != " " ]; then
2826 MAX_EVENTS=1000000
2927fi
3028
31- FILES=( " q0" " q1" " q2" " q3" " q4" " q5" " q6" " q7" " q8" " q9" " q12" " q13" " q14" " q15" " q16" " q17" " q18" " q19" " q20" " q21" " q22" )
32- for FILE in " ${FILES[@]} "
33- do cargo bench --bench nexmark -- --max-events=${MAX_EVENTS} --cpu-cores ${CORES} --num-event-generators ${GENERATORS} --source-buffer-size 10000 --input-batch-size 40000 --csv ${NEXMARK_CSV_FILE} --query $FILE
34- done
35- mkdir -p ${NEXMARK_RESULTS_DIR}
36- mv crates/nexmark/${NEXMARK_CSV_FILE} $NEXMARK_RESULTS_DIR
29+ if [ " $CLOUD " = " " ]; then
30+ GENERATORS=8
31+ CORES=6
32+ FILES=( " q0" " q1" " q2" " q3" " q4" " q5" " q6" " q7" " q8" " q9" " q12" " q13" " q14" " q15" " q16" " q17" " q18" " q19" " q20" " q21" " q22" )
33+ for FILE in " ${FILES[@]} "
34+ do cargo bench --bench nexmark -- --max-events=${MAX_EVENTS} --cpu-cores ${CORES} --num-event-generators ${GENERATORS} --source-buffer-size 10000 --input-batch-size 40000 --csv ${NEXMARK_CSV_FILE} --query $FILE
35+ done
36+ mkdir -p ${NEXMARK_RESULTS_DIR}
37+ mv crates/nexmark/${NEXMARK_CSV_FILE} $NEXMARK_RESULTS_DIR
38+ fi
3739
38- # Run nexmark SQL benchmark
39- # This test requires a running instance of redpanda and pipeline-manager.
40+ # Run SQL benchmarks
41+ # These require a running instance of redpanda (if they don't use nexmark connector) and pipeline-manager.
4042# The Earthfile should run those.
41- # 100M events causes out of memory problems with SQL tests
42- MAX_EVENTS=100000000
43- if [ " $SMOKE " != " " ]; then
44- MAX_EVENTS=1000000
45- fi
46- KAFKA_BROKER=localhost:9092
4743
44+ KAFKA_BROKER=localhost:9092
4845FELDERA_API=http://localhost:8080
46+
47+ if [ " $CLOUD " != " " ]; then
48+ FELDERA_API=$API_URL
49+ KAFKA_BROKER=' ${secret:demo-bootstrap-servers}'
50+ CLOUD_OPTIONS=' -O security.protocol=${secret:demo-security-protocol}
51+ -O ssl.ca.pem=${secret:demo-ssl-ca-pem}
52+ -O ssl.certificate.pem=${secret:demo-ssl-certificate-pem}
53+ -O ssl.key.pem=${secret:demo-ssl-key-pem}
54+ -O ssl.key.password=${secret:demo-ssl-key-password}
55+ -O ssl.endpoint.identification.algorithm=${secret:demo-ssl-endpoint-identification-algorithm}
56+ -O sasl.mechanism=${secret:demo-sasl-mechanism}
57+ -O sasl.username=${secret:demo-sasl-username}
58+ -O sasl.password=${secret:demo-sasl-password}
59+ --api-key ' ${API_KEY}
60+ fi
61+
4962sql_benchmark () {
5063 mkdir -p $RESULTS_DIR /$name
5164 local csv=$1 metrics=$2 ; shift ; shift
5265 python3 benchmark/feldera-sql/run.py \
53- --api-url $FELDERA_API \
54- --events $MAX_EVENTS \
55- -O bootstrap.servers=$KAFKA_BROKER \
56- --csv " $RESULTS_DIR /$name /$csv " \
57- --csv-metrics " $RESULTS_DIR /$name /$metrics " \
58- --metrics-interval 1 \
59- --poller-threads 10 \
60- " $@ "
66+ --api-url $FELDERA_API \
67+ --events $MAX_EVENTS \
68+ -O bootstrap.servers=$KAFKA_BROKER \
69+ --csv " $RESULTS_DIR /$name /$csv " \
70+ --csv-metrics " $RESULTS_DIR /$name /$metrics " \
71+ --metrics-interval 1 \
72+ --poller-threads 10 \
73+ " $@ "
6174}
6275
6376DIR=" benchmark/feldera-sql/benchmarks/"
64- for test in $DIR /* ; do
77+ TESTS=${DIR} /*
78+ if [ " $CLOUD " != " " ]; then
79+ TESTS=${DIR} /nexmark
80+ fi
81+
82+ for test in ${TESTS} ; do
6583 if test -e ${test} /generate.bash; then
6684 rpk topic -X brokers=$KAFKA_BROKER delete -r ' .*'
6785 source ${test} /generate.bash
6886 fi
6987 name=$( basename $test )
7088 sql_benchmark " sql_${name} _results.csv" " sql_${name} _metrics.csv" --folder benchmarks/${name}
71- sql_benchmark " sql_storage_${name} _results.csv" " sql_storage_${name} _metrics.csv" --storage --folder benchmarks/${name}
89+ # We are currently skipping running storage benchmarks on cloud until we can get a
90+ # better disk for cloud.
91+ if [ " $CLOUD " = " " ]; then
92+ sql_benchmark " sql_storage_${name} _results.csv" " sql_storage_${name} _metrics.csv" --storage --folder benchmarks/${name}
93+ fi
7294done
7395
74- # Run galen benchmark
75- cargo bench --bench galen -- --workers 10 --csv ${GALEN_CSV_FILE}
76- mkdir -p ${GALEN_RESULTS_DIR}
77- mv crates/dbsp/${GALEN_CSV_FILE} ${GALEN_RESULTS_DIR}
96+ if [ " $CLOUD " = " " ]; then
97+ # Run galen benchmark
98+ cargo bench --bench galen -- --workers 10 --csv ${GALEN_CSV_FILE}
99+ mkdir -p ${GALEN_RESULTS_DIR}
100+ mv crates/dbsp/${GALEN_CSV_FILE} ${GALEN_RESULTS_DIR}
78101
79- # Run ldbc benchmarks
80- DATASET_SMALL=' graph500-22'
81- DATASET_MEDIUM=' datagen-8_4-fb'
82- if [ " $SMOKE " != " " ]; then
83- DATASET_SMALL=' wiki-Talk'
84- DATASET_MEDIUM=' kgs'
85- fi
86- # cargo bench --bench ldbc-graphalytics -- bfs ${DATASET_SMALL} --threads 1 --csv ${LDBC_CSV_FILE}
87- # cargo bench --bench ldbc-graphalytics -- bfs ${DATASET_MEDIUM} --threads 6 --csv ${LDBC_CSV_FILE}
88- # cargo bench --bench ldbc-graphalytics -- pagerank ${DATASET_SMALL} --threads 1 --csv ${LDBC_CSV_FILE}
89- # cargo bench --bench ldbc-graphalytics -- pagerank ${DATASET_MEDIUM} --threads 6 --csv ${LDBC_CSV_FILE}
90- # mkdir -p ${LDBC_RESULTS_DIR}
91- # mv crates/dbsp/${LDBC_CSV_FILE} ${LDBC_RESULTS_DIR}
102+ # Run ldbc benchmarks
103+ DATASET_SMALL=' graph500-22'
104+ DATASET_MEDIUM=' datagen-8_4-fb'
105+ if [ " $SMOKE " != " " ]; then
106+ DATASET_SMALL=' wiki-Talk'
107+ DATASET_MEDIUM=' kgs'
108+ fi
109+ # cargo bench --bench ldbc-graphalytics -- bfs ${DATASET_SMALL} --threads 1 --csv ${LDBC_CSV_FILE}
110+ # cargo bench --bench ldbc-graphalytics -- bfs ${DATASET_MEDIUM} --threads 6 --csv ${LDBC_CSV_FILE}
111+ # cargo bench --bench ldbc-graphalytics -- pagerank ${DATASET_SMALL} --threads 1 --csv ${LDBC_CSV_FILE}
112+ # cargo bench --bench ldbc-graphalytics -- pagerank ${DATASET_MEDIUM} --threads 6 --csv ${LDBC_CSV_FILE}
113+ # mkdir -p ${LDBC_RESULTS_DIR}
114+ # mv crates/dbsp/${LDBC_CSV_FILE} ${LDBC_RESULTS_DIR}
92115
93- # Run nexmark benchmark with persistence
94- MAX_EVENTS=3000000
95- CORES=1
96- if [ " $SMOKE " != " " ]; then
97- MAX_EVENTS=100000
98- fi
99- cargo bench --bench nexmark -- --max-events=${MAX_EVENTS} --cpu-cores ${CORES} --num-event-generators 6 --source-buffer-size 10000 --input-batch-size 40000 --csv ${NEXMARK_DRAM_CSV_FILE}
100- mv crates/nexmark/${NEXMARK_DRAM_CSV_FILE} $NEXMARK_RESULTS_DIR
101- # cargo bench --bench nexmark --features persistence -- --max-events=${MAX_EVENTS} --cpu-cores ${CORES} --num-event-generators 6 --source-buffer-size 10000 --input-batch-size 40000 --csv ${NEXMARK_PERSISTENCE_CSV_FILE}
102- # mv crates/nexmark/${NEXMARK_PERSISTENCE_CSV_FILE} $NEXMARK_RESULTS_DIR
116+ # Run nexmark benchmark with persistence
117+ MAX_EVENTS=3000000
118+ CORES=1
119+ if [ " $SMOKE " != " " ]; then
120+ MAX_EVENTS=100000
121+ fi
122+ cargo bench --bench nexmark -- --max-events=${MAX_EVENTS} --cpu-cores ${CORES} --num-event-generators 6 --source-buffer-size 10000 --input-batch-size 40000 --csv ${NEXMARK_DRAM_CSV_FILE}
123+ mv crates/nexmark/${NEXMARK_DRAM_CSV_FILE} $NEXMARK_RESULTS_DIR
124+ # cargo bench --bench nexmark --features persistence -- --max-events=${MAX_EVENTS} --cpu-cores ${CORES} --num-event-generators 6 --source-buffer-size 10000 --input-batch-size 40000 --csv ${NEXMARK_PERSISTENCE_CSV_FILE}
125+ # mv crates/nexmark/${NEXMARK_PERSISTENCE_CSV_FILE} $NEXMARK_RESULTS_DIR
126+ fi
0 commit comments