| 1 | // Copyright 2014 The Flutter Authors. All rights reserved. |
| 2 | // Use of this source code is governed by a BSD-style license that can be |
| 3 | // found in the LICENSE file. |
| 4 | |
| 5 | import 'dart:convert'; |
| 6 | import 'dart:io'; |
| 7 | |
| 8 | import 'package:collection/collection.dart' ; |
| 9 | import 'package:metrics_center/metrics_center.dart' ; |
| 10 | |
| 11 | /// Authenticate and connect to gcloud storage. |
| 12 | /// |
| 13 | /// It supports both token and credential authentications. |
| 14 | Future<FlutterDestination> connectFlutterDestination() async { |
| 15 | final Map<String, String> env = Platform.environment; |
| 16 | final bool isTesting = env['IS_TESTING' ] == 'true' ; |
| 17 | if (env case {'TOKEN_PATH' : final String path, 'GCP_PROJECT' : final String project}) { |
| 18 | return FlutterDestination.makeFromAccessToken( |
| 19 | File(path).readAsStringSync(), |
| 20 | project, |
| 21 | isTesting: isTesting, |
| 22 | ); |
| 23 | } |
| 24 | return FlutterDestination.makeFromCredentialsJson( |
| 25 | jsonDecode(env['BENCHMARK_GCP_CREDENTIALS' ]!) as Map<String, dynamic>, |
| 26 | isTesting: isTesting, |
| 27 | ); |
| 28 | } |
| 29 | |
| 30 | /// Parse results and append additional benchmark tags into Metric Points. |
| 31 | /// |
| 32 | /// An example of `resultsJson`: |
| 33 | /// { |
| 34 | /// "CommitBranch": "master", |
| 35 | /// "CommitSha": "abc", |
| 36 | /// "BuilderName": "test", |
| 37 | /// "ResultData": { |
| 38 | /// "average_frame_build_time_millis": 0.4550425531914895, |
| 39 | /// "90th_percentile_frame_build_time_millis": 0.473 |
| 40 | /// }, |
| 41 | /// "BenchmarkScoreKeys": [ |
| 42 | /// "average_frame_build_time_millis", |
| 43 | /// "90th_percentile_frame_build_time_millis" |
| 44 | /// ] |
| 45 | /// } |
| 46 | /// |
| 47 | /// An example of `benchmarkTags`: |
| 48 | /// { |
| 49 | /// "arch": "intel", |
| 50 | /// "device_type": "Moto G Play", |
| 51 | /// "device_version": "android-25", |
| 52 | /// "host_type": "linux", |
| 53 | /// "host_version": "debian-10.11" |
| 54 | /// } |
| 55 | List<MetricPoint> parse( |
| 56 | Map<String, dynamic> resultsJson, |
| 57 | Map<String, dynamic> benchmarkTags, |
| 58 | String taskName, |
| 59 | ) { |
| 60 | print('Results to upload to skia perf: $resultsJson' ); |
| 61 | print('Benchmark tags to upload to skia perf: $benchmarkTags' ); |
| 62 | final List<String> scoreKeys = |
| 63 | (resultsJson['BenchmarkScoreKeys' ] as List<dynamic>?)?.cast<String>() ?? const <String>[]; |
| 64 | final Map<String, dynamic> resultData = |
| 65 | resultsJson['ResultData' ] as Map<String, dynamic>? ?? const <String, dynamic>{}; |
| 66 | final String gitBranch = (resultsJson['CommitBranch' ] as String).trim(); |
| 67 | final String gitSha = (resultsJson['CommitSha' ] as String).trim(); |
| 68 | final List<MetricPoint> metricPoints = <MetricPoint>[]; |
| 69 | for (final String scoreKey in scoreKeys) { |
| 70 | Map<String, String> tags = <String, String>{ |
| 71 | kGithubRepoKey: kFlutterFrameworkRepo, |
| 72 | kGitRevisionKey: gitSha, |
| 73 | 'branch' : gitBranch, |
| 74 | kNameKey: taskName, |
| 75 | kSubResultKey: scoreKey, |
| 76 | }; |
| 77 | // Append additional benchmark tags, which will surface in Skia Perf dashboards. |
| 78 | tags = mergeMaps<String, String>( |
| 79 | tags, |
| 80 | benchmarkTags.map( |
| 81 | (String key, dynamic value) => MapEntry<String, String>(key, value.toString()), |
| 82 | ), |
| 83 | ); |
| 84 | metricPoints.add(MetricPoint((resultData[scoreKey] as num).toDouble(), tags)); |
| 85 | } |
| 86 | return metricPoints; |
| 87 | } |
| 88 | |
| 89 | /// Upload metrics to GCS bucket used by Skia Perf. |
| 90 | /// |
| 91 | /// Skia Perf picks up all available files under the folder, and |
| 92 | /// is robust to duplicate entries. |
| 93 | /// |
| 94 | /// Files will be named based on `taskName`, such as |
| 95 | /// `complex_layout_scroll_perf__timeline_summary_values.json`. |
| 96 | /// If no `taskName` is specified, data will be saved to |
| 97 | /// `default_values.json`. |
| 98 | Future<void> upload( |
| 99 | FlutterDestination metricsDestination, |
| 100 | List<MetricPoint> metricPoints, |
| 101 | int commitTimeSinceEpoch, |
| 102 | String taskName, |
| 103 | ) async { |
| 104 | await metricsDestination.update( |
| 105 | metricPoints, |
| 106 | DateTime.fromMillisecondsSinceEpoch(commitTimeSinceEpoch, isUtc: true), |
| 107 | taskName, |
| 108 | ); |
| 109 | } |
| 110 | |
| 111 | /// Upload JSON results to skia perf. |
| 112 | /// |
| 113 | /// Flutter infrastructure's workflow is: |
| 114 | /// 1. Run DeviceLab test, writing results to a known path |
| 115 | /// 2. Request service account token from luci auth (valid for at least 3 minutes) |
| 116 | /// 3. Upload results from (1) to skia perf. |
| 117 | Future<void> uploadToSkiaPerf( |
| 118 | String? resultsPath, |
| 119 | String? commitTime, |
| 120 | String? taskName, |
| 121 | String? benchmarkTags, |
| 122 | ) async { |
| 123 | int commitTimeSinceEpoch; |
| 124 | if (resultsPath == null) { |
| 125 | return; |
| 126 | } |
| 127 | if (commitTime != null) { |
| 128 | commitTimeSinceEpoch = 1000 * int.parse(commitTime); |
| 129 | } else { |
| 130 | commitTimeSinceEpoch = DateTime.now().millisecondsSinceEpoch; |
| 131 | } |
| 132 | taskName = taskName ?? 'default' ; |
| 133 | final Map<String, dynamic> benchmarkTagsMap = |
| 134 | jsonDecode(benchmarkTags ?? '{}' ) as Map<String, dynamic>; |
| 135 | final File resultFile = File(resultsPath); |
| 136 | Map<String, dynamic> resultsJson = <String, dynamic>{}; |
| 137 | resultsJson = json.decode(await resultFile.readAsString()) as Map<String, dynamic>; |
| 138 | final List<MetricPoint> metricPoints = parse(resultsJson, benchmarkTagsMap, taskName); |
| 139 | final FlutterDestination metricsDestination = await connectFlutterDestination(); |
| 140 | await upload( |
| 141 | metricsDestination, |
| 142 | metricPoints, |
| 143 | commitTimeSinceEpoch, |
| 144 | metricFileName(taskName, benchmarkTagsMap), |
| 145 | ); |
| 146 | } |
| 147 | |
| 148 | /// Create metric file name based on `taskName`, `arch`, `host type`, and `device type`. |
| 149 | /// |
| 150 | /// Same `taskName` may run on different platforms. Considering host/device tags to |
| 151 | /// use different metric file names. |
| 152 | /// |
| 153 | /// This affects only the metric file name which contains metric data, and does not affect |
| 154 | /// real host/device tags. |
| 155 | /// |
| 156 | /// For example: |
| 157 | /// Old file name: `backdrop_filter_perf__timeline_summary` |
| 158 | /// New file name: `backdrop_filter_perf__timeline_summary_intel_linux_motoG4` |
| 159 | String metricFileName(String taskName, Map<String, dynamic> benchmarkTagsMap) { |
| 160 | final StringBuffer fileName = StringBuffer(taskName); |
| 161 | if (benchmarkTagsMap.containsKey('arch' )) { |
| 162 | fileName |
| 163 | ..write('_' ) |
| 164 | ..write(_fileNameFormat(benchmarkTagsMap['arch' ] as String)); |
| 165 | } |
| 166 | if (benchmarkTagsMap.containsKey('host_type' )) { |
| 167 | fileName |
| 168 | ..write('_' ) |
| 169 | ..write(_fileNameFormat(benchmarkTagsMap['host_type' ] as String)); |
| 170 | } |
| 171 | if (benchmarkTagsMap.containsKey('device_type' )) { |
| 172 | fileName |
| 173 | ..write('_' ) |
| 174 | ..write(_fileNameFormat(benchmarkTagsMap['device_type' ] as String)); |
| 175 | } |
| 176 | return fileName.toString(); |
| 177 | } |
| 178 | |
| 179 | /// Format `fileName` removing non letter and number characters. |
| 180 | String _fileNameFormat(String fileName) { |
| 181 | return fileName.replaceAll(RegExp('[^a-zA-Z0-9]' ), '' ); |
| 182 | } |
| 183 | |