|
1 | | -# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. |
| 1 | +# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. |
2 | 2 | # |
3 | 3 | # Licensed under the Apache License, Version 2.0 (the "License"); |
4 | 4 | # you may not use this file except in compliance with the License. |
|
17 | 17 | import argparse |
18 | 18 |
|
19 | 19 |
|
20 | | -def process_performance_stats(timestamps, batch_size): |
| 20 | +def process_performance_stats(timestamps, batch_size, mode): |
| 21 | + """ Get confidence intervals |
| 22 | +
|
| 23 | + :param timestamps: Collection of timestamps |
| 24 | + :param batch_size: Number of samples per batch |
| 25 | + :param mode: Estimator's execution mode |
| 26 | + :return: Stats |
| 27 | + """ |
21 | 28 | timestamps_ms = 1000 * timestamps |
22 | | - latency_ms = timestamps_ms.mean() |
23 | | - std = timestamps_ms.std() |
24 | | - n = np.sqrt(len(timestamps_ms)) |
25 | 29 | throughput_imgps = (1000.0 * batch_size / timestamps_ms).mean() |
| 30 | + stats = {f"throughput_{mode}": throughput_imgps, |
| 31 | + f"latency_{mode}_mean": timestamps_ms.mean()} |
| 32 | + for level in [90, 95, 99]: |
| 33 | + stats.update({f"latency_{mode}_{level}": np.percentile(timestamps_ms, level)}) |
26 | 34 |
|
27 | | - stats = [("Throughput Avg", str(throughput_imgps)), |
28 | | - ('Latency Avg:', str(latency_ms))] |
29 | | - for ci, lvl in zip(["90%:", "95%:", "99%:"], |
30 | | - [1.645, 1.960, 2.576]): |
31 | | - stats.append(("Latency_" + ci, str(latency_ms + lvl * std / n))) |
32 | 35 | return stats |
33 | 36 |
|
34 | 37 |
|
@@ -77,4 +80,3 @@ def parse_convergence_results(path, environment): |
77 | 80 | parse_convergence_results(path=args.model_dir, environment=args.env) |
78 | 81 | elif args.exec_mode == 'benchmark': |
79 | 82 | pass |
80 | | - print() |
|
0 commit comments