mirror of
https://github.com/godotengine/godot-angle-static.git
synced 2026-01-06 02:09:55 +03:00
Add missing perf results merger scripts.
While waiting for the git mirror to become available we need to clone these scripts into ANGLE. These scripts are required for the merge step to work properly. Bug: angleproject:5114 Change-Id: Ie917149972c8339419b1e3a6d0cf6712fb9888d5 Reviewed-on: https://chromium-review.googlesource.com/c/angle/angle/+/2877011 Commit-Queue: Jamie Madill <jmadill@chromium.org> Reviewed-by: Yuly Novikov <ynovikov@chromium.org>
This commit is contained in:
487
tools/perf/core/results_dashboard.py
Executable file
487
tools/perf/core/results_dashboard.py
Executable file
@@ -0,0 +1,487 @@
|
||||
#!/usr/bin/env vpython
|
||||
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
"""Functions for adding results to perf dashboard."""
|
||||
|
||||
# This file was copy-pasted over from:
|
||||
# //build/scripts/slave/results_dashboard.py
|
||||
# That file is now deprecated and this one is
|
||||
# the new source of truth.
|
||||
|
||||
import calendar
|
||||
import datetime
|
||||
import httplib
|
||||
import json
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
import zlib
|
||||
import logging
|
||||
import six.moves.urllib.error # pylint: disable=import-error
|
||||
import six.moves.urllib.parse # pylint: disable=import-error
|
||||
import six.moves.urllib.request # pylint: disable=import-error
|
||||
|
||||
# TODO(crbug.com/996778): Figure out how to get httplib2 hermetically.
|
||||
import httplib2 # pylint: disable=import-error
|
||||
|
||||
from core import path_util
|
||||
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format='(%(levelname)s) %(asctime)s pid=%(process)d'
|
||||
' %(module)s.%(funcName)s:%(lineno)d %(message)s')
|
||||
|
||||
# The paths in the results dashboard URLs for sending results.
|
||||
SEND_RESULTS_PATH = '/add_point'
|
||||
SEND_HISTOGRAMS_PATH = '/add_histograms'
|
||||
|
||||
|
||||
class SendResultException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class SendResultsRetryException(SendResultException):
|
||||
pass
|
||||
|
||||
|
||||
class SendResultsFatalException(SendResultException):
|
||||
pass
|
||||
|
||||
|
||||
def LuciAuthTokenGeneratorCallback():
|
||||
args = ['luci-auth', 'token']
|
||||
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
if p.wait() == 0:
|
||||
return p.stdout.read()
|
||||
else:
|
||||
raise RuntimeError('Error generating authentication token.\nStdout: %s\nStder:%s' %
|
||||
(p.stdout.read(), p.stderr.read()))
|
||||
|
||||
|
||||
def SendResults(data,
|
||||
data_label,
|
||||
url,
|
||||
send_as_histograms=False,
|
||||
token_generator_callback=LuciAuthTokenGeneratorCallback,
|
||||
num_retries=4):
|
||||
"""Sends results to the Chrome Performance Dashboard.
|
||||
|
||||
This function tries to send the given data to the dashboard.
|
||||
|
||||
Args:
|
||||
data: The data to try to send. Must be JSON-serializable.
|
||||
data_label: string name of the data to be uploaded. This is only used for
|
||||
logging purpose.
|
||||
url: Performance Dashboard URL (including schema).
|
||||
send_as_histograms: True if result is to be sent to /add_histograms.
|
||||
token_generator_callback: a callback for generating the authentication token
|
||||
to upload to perf dashboard.
|
||||
If |token_generator_callback| is not specified, it's default to
|
||||
LuciAuthTokenGeneratorCallback.
|
||||
num_retries: Number of times to retry uploading to the perf dashboard upon
|
||||
recoverable error.
|
||||
"""
|
||||
start = time.time()
|
||||
all_data_uploaded = False
|
||||
data_type = ('histogram' if send_as_histograms else 'chartjson')
|
||||
dashboard_data_str = json.dumps(data)
|
||||
# When perf dashboard is overloaded, it takes sometimes to spin up new
|
||||
# instance. So sleep before retrying again. (
|
||||
# For more details, see crbug.com/867379.
|
||||
wait_before_next_retry_in_seconds = 15
|
||||
for i in xrange(1, num_retries + 1):
|
||||
try:
|
||||
logging.info('Sending %s result of %s to dashboard (attempt %i out of %i).' %
|
||||
(data_type, data_label, i, num_retries))
|
||||
if send_as_histograms:
|
||||
_SendHistogramJson(url, dashboard_data_str, token_generator_callback)
|
||||
else:
|
||||
# TODO(eakuefner): Remove this logic once all bots use histograms.
|
||||
_SendResultsJson(url, dashboard_data_str, token_generator_callback)
|
||||
all_data_uploaded = True
|
||||
break
|
||||
except SendResultsRetryException as e:
|
||||
logging.error('Error while uploading %s data: %s' % (data_type, str(e)))
|
||||
time.sleep(wait_before_next_retry_in_seconds)
|
||||
wait_before_next_retry_in_seconds *= 2
|
||||
except SendResultsFatalException as e:
|
||||
logging.error('Fatal error while uploading %s data: %s' % (data_type, str(e)))
|
||||
break
|
||||
except Exception:
|
||||
logging.error('Unexpected error while uploading %s data: %s' %
|
||||
(data_type, traceback.format_exc()))
|
||||
break
|
||||
logging.info('Time spent sending results to %s: %s' % (url, time.time() - start))
|
||||
return all_data_uploaded
|
||||
|
||||
|
||||
def MakeHistogramSetWithDiagnostics(histograms_file,
|
||||
test_name,
|
||||
bot,
|
||||
buildername,
|
||||
buildnumber,
|
||||
project,
|
||||
buildbucket,
|
||||
revisions_dict,
|
||||
is_reference_build,
|
||||
perf_dashboard_machine_group,
|
||||
output_dir,
|
||||
max_bytes=0):
|
||||
"""Merges Histograms, adds Diagnostics, and batches the results.
|
||||
|
||||
Args:
|
||||
histograms_file: input filename
|
||||
output_dir: output directory
|
||||
max_bytes: If non-zero, tries to produce files no larger than max_bytes.
|
||||
(May generate a file that is larger than max_bytes if max_bytes is smaller
|
||||
than a single Histogram.)
|
||||
"""
|
||||
add_diagnostics_args = []
|
||||
add_diagnostics_args.extend([
|
||||
'--benchmarks',
|
||||
test_name,
|
||||
'--bots',
|
||||
bot,
|
||||
'--builds',
|
||||
buildnumber,
|
||||
'--masters',
|
||||
perf_dashboard_machine_group,
|
||||
'--is_reference_build',
|
||||
'true' if is_reference_build else '',
|
||||
])
|
||||
|
||||
if max_bytes:
|
||||
add_diagnostics_args.extend(['--max_bytes', max_bytes])
|
||||
|
||||
build_status_url = _MakeBuildStatusUrl(project, buildbucket, buildername, buildnumber)
|
||||
if build_status_url:
|
||||
add_diagnostics_args.extend(['--build_urls_k', 'Build Status'])
|
||||
add_diagnostics_args.extend(['--build_urls_v', build_status_url])
|
||||
|
||||
for k, v in revisions_dict.items():
|
||||
add_diagnostics_args.extend((k, v))
|
||||
|
||||
add_diagnostics_args.append(histograms_file)
|
||||
|
||||
# Subprocess only accepts string args
|
||||
add_diagnostics_args = [str(v) for v in add_diagnostics_args]
|
||||
|
||||
add_reserved_diagnostics_path = os.path.join(path_util.GetChromiumSrcDir(), 'third_party',
|
||||
'catapult', 'tracing', 'bin',
|
||||
'add_reserved_diagnostics')
|
||||
|
||||
# This script may write multiple files to output_dir.
|
||||
output_path = os.path.join(output_dir, test_name + '.json')
|
||||
cmd = ([sys.executable, add_reserved_diagnostics_path] + add_diagnostics_args +
|
||||
['--output_path', output_path])
|
||||
logging.info(cmd)
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
|
||||
def MakeListOfPoints(charts,
|
||||
bot,
|
||||
test_name,
|
||||
project,
|
||||
buildbucket,
|
||||
buildername,
|
||||
buildnumber,
|
||||
supplemental_columns,
|
||||
perf_dashboard_machine_group,
|
||||
revisions_dict=None):
|
||||
"""Constructs a list of point dictionaries to send.
|
||||
|
||||
The format output by this function is the original format for sending data
|
||||
to the perf dashboard.
|
||||
|
||||
Args:
|
||||
charts: A dictionary of chart names to chart data, as generated by the
|
||||
log processor classes (see process_log_utils.GraphingLogProcessor).
|
||||
bot: A string which comes from perf_id, e.g. linux-release.
|
||||
test_name: A test suite name, e.g. sunspider.
|
||||
buildername: Builder name (for stdio links).
|
||||
buildnumber: Build number (for stdio links).
|
||||
supplemental_columns: A dictionary of extra data to send with a point.
|
||||
perf_dashboard_machine_group: Builder's perf machine group.
|
||||
|
||||
Returns:
|
||||
A list of dictionaries in the format accepted by the perf dashboard.
|
||||
Each dictionary has the keys "master", "bot", "test", "value", "revision".
|
||||
The full details of this format are described at http://goo.gl/TcJliv.
|
||||
"""
|
||||
results = []
|
||||
|
||||
for chart_name, chart_data in sorted(charts.items()):
|
||||
point_id, revision_columns = _RevisionNumberColumns(
|
||||
revisions_dict if revisions_dict is not None else chart_data, prefix='r_')
|
||||
|
||||
for trace_name, trace_values in sorted(chart_data['traces'].items()):
|
||||
is_important = trace_name in chart_data.get('important', [])
|
||||
test_path = _TestPath(test_name, chart_name, trace_name)
|
||||
result = {
|
||||
'master': perf_dashboard_machine_group,
|
||||
'bot': bot,
|
||||
'test': test_path,
|
||||
'revision': point_id,
|
||||
'supplemental_columns': {}
|
||||
}
|
||||
|
||||
# Add the supplemental_columns values that were passed in after the
|
||||
# calculated revision column values so that these can be overwritten.
|
||||
result['supplemental_columns'].update(revision_columns)
|
||||
result['supplemental_columns'].update(
|
||||
_GetBuildStatusUriColumn(project, buildbucket, buildername, buildnumber))
|
||||
result['supplemental_columns'].update(supplemental_columns)
|
||||
|
||||
result['value'] = trace_values[0]
|
||||
result['error'] = trace_values[1]
|
||||
|
||||
# Add other properties to this result dictionary if available.
|
||||
if chart_data.get('units'):
|
||||
result['units'] = chart_data['units']
|
||||
if is_important:
|
||||
result['important'] = True
|
||||
|
||||
results.append(result)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def MakeDashboardJsonV1(chart_json, revision_dict, test_name, bot, project, buildbucket,
|
||||
buildername, buildnumber, supplemental_dict, is_ref,
|
||||
perf_dashboard_machine_group):
|
||||
"""Generates Dashboard JSON in the new Telemetry format.
|
||||
|
||||
See http://goo.gl/mDZHPl for more info on the format.
|
||||
|
||||
Args:
|
||||
chart_json: A dict containing the telmetry output.
|
||||
revision_dict: Dictionary of revisions to include, include "rev",
|
||||
which determines the point ID.
|
||||
test_name: A test suite name, e.g. sunspider.
|
||||
bot: A string which comes from perf_id, e.g. linux-release.
|
||||
buildername: Builder name (for stdio links).
|
||||
buildnumber: Build number (for stdio links).
|
||||
supplemental_dict: A dictionary of extra data to send with a point;
|
||||
this includes revisions and annotation data.
|
||||
is_ref: True if this is a reference build, False otherwise.
|
||||
perf_dashboard_machine_group: Builder's perf machine group.
|
||||
|
||||
Returns:
|
||||
A dictionary in the format accepted by the perf dashboard.
|
||||
"""
|
||||
if not chart_json:
|
||||
logging.error('Error: No json output from telemetry.')
|
||||
logging.error('@@@STEP_FAILURE@@@')
|
||||
|
||||
point_id, versions = _RevisionNumberColumns(revision_dict, prefix='')
|
||||
|
||||
supplemental = {}
|
||||
for key in supplemental_dict:
|
||||
if key.startswith('r_'):
|
||||
versions[key.replace('r_', '', 1)] = supplemental_dict[key]
|
||||
if key.startswith('a_'):
|
||||
supplemental[key.replace('a_', '', 1)] = supplemental_dict[key]
|
||||
|
||||
supplemental.update(_GetBuildStatusUriColumn(project, buildbucket, buildername, buildnumber))
|
||||
|
||||
# TODO(sullivan): The android recipe sends "test_name.reference"
|
||||
# while the desktop one just sends "test_name" for ref builds. Need
|
||||
# to figure out why.
|
||||
# https://github.com/catapult-project/catapult/issues/2046
|
||||
test_name = test_name.replace('.reference', '')
|
||||
|
||||
fields = {
|
||||
'master': perf_dashboard_machine_group,
|
||||
'bot': bot,
|
||||
'test_suite_name': test_name,
|
||||
'point_id': point_id,
|
||||
'supplemental': supplemental,
|
||||
'versions': versions,
|
||||
'chart_data': chart_json,
|
||||
'is_ref': is_ref,
|
||||
}
|
||||
return fields
|
||||
|
||||
|
||||
def _MakeBuildStatusUrl(project, buildbucket, buildername, buildnumber):
|
||||
if not (buildername and buildnumber):
|
||||
return None
|
||||
if not project:
|
||||
project = 'chrome'
|
||||
if not buildbucket:
|
||||
buildbucket = 'ci'
|
||||
return 'https://ci.chromium.org/ui/p/%s/builders/%s/%s/%s' % (
|
||||
six.moves.urllib.parse.quote(project), six.moves.urllib.parse.quote(buildbucket),
|
||||
six.moves.urllib.parse.quote(buildername), six.moves.urllib.parse.quote(str(buildnumber)))
|
||||
|
||||
|
||||
def _GetBuildStatusUriColumn(project, buildbucket, buildername, buildnumber):
|
||||
"""Gets a supplemental column containing buildbot status link."""
|
||||
url = _MakeBuildStatusUrl(project, buildbucket, buildername, buildnumber)
|
||||
if not url:
|
||||
return {}
|
||||
return _CreateLinkColumn('build_uri', 'Buildbot status page', url)
|
||||
|
||||
|
||||
def _CreateLinkColumn(name, label, url):
|
||||
"""Returns a column containing markdown link to show on dashboard."""
|
||||
return {'a_' + name: '[%s](%s)' % (label, url)}
|
||||
|
||||
|
||||
def _GetTimestamp():
|
||||
"""Get the Unix timestamp for the current time."""
|
||||
return int(calendar.timegm(datetime.datetime.utcnow().utctimetuple()))
|
||||
|
||||
|
||||
def _RevisionNumberColumns(data, prefix):
|
||||
"""Get the point id and revision-related columns from the given data.
|
||||
|
||||
Args:
|
||||
data: A dict of information from one line of the log file.
|
||||
master: The name of the buildbot master.
|
||||
prefix: Prefix for revision type keys. 'r_' for non-telemetry JSON, '' for
|
||||
telemetry JSON.
|
||||
|
||||
Returns:
|
||||
A tuple with the point id (which must be an int), and a dict of
|
||||
revision-related columns.
|
||||
"""
|
||||
revision_supplemental_columns = {}
|
||||
|
||||
# The dashboard requires points' x-values to be integers, and points are
|
||||
# ordered by these x-values. If data['rev'] can't be parsed as an int, assume
|
||||
# that it's a git commit hash and use timestamp as the x-value.
|
||||
try:
|
||||
revision = int(data['rev'])
|
||||
if revision and 300000 < revision < 1000000:
|
||||
# Assume that revision is the commit position number for the master
|
||||
# branch in the chromium/src repo.
|
||||
revision_supplemental_columns[prefix + 'commit_pos'] = revision
|
||||
except ValueError:
|
||||
# The dashboard requires ordered integer revision numbers. If the revision
|
||||
# is not an integer, assume it's a git hash and send a timestamp.
|
||||
revision = _GetTimestamp()
|
||||
revision_supplemental_columns[prefix + 'chromium'] = data['rev']
|
||||
|
||||
# An explicit data['point_id'] overrides the default behavior.
|
||||
if 'point_id' in data:
|
||||
revision = int(data['point_id'])
|
||||
|
||||
# For other revision data, add it if it's present and not undefined:
|
||||
for key in ['webrtc_git', 'v8_rev']:
|
||||
if key in data and data[key] != 'undefined':
|
||||
revision_supplemental_columns[prefix + key] = data[key]
|
||||
|
||||
# If possible, also send the git hash.
|
||||
if 'git_revision' in data and data['git_revision'] != 'undefined':
|
||||
revision_supplemental_columns[prefix + 'chromium'] = data['git_revision']
|
||||
|
||||
return revision, revision_supplemental_columns
|
||||
|
||||
|
||||
def _TestPath(test_name, chart_name, trace_name):
|
||||
"""Get the slash-separated test path to send.
|
||||
|
||||
Args:
|
||||
test: Test name. Typically, this will be a top-level 'test suite' name.
|
||||
chart_name: Name of a chart where multiple trace lines are grouped. If the
|
||||
chart name is the same as the trace name, that signifies that this is
|
||||
the main trace for the chart.
|
||||
trace_name: The "trace name" is the name of an individual line on chart.
|
||||
|
||||
Returns:
|
||||
A slash-separated list of names that corresponds to the hierarchy of test
|
||||
data in the Chrome Performance Dashboard; doesn't include master or bot
|
||||
name.
|
||||
"""
|
||||
# For tests run on reference builds by builds/scripts/slave/telemetry.py,
|
||||
# "_ref" is appended to the trace name. On the dashboard, as long as the
|
||||
# result is on the right chart, it can just be called "ref".
|
||||
if trace_name == chart_name + '_ref':
|
||||
trace_name = 'ref'
|
||||
chart_name = chart_name.replace('_by_url', '')
|
||||
|
||||
# No slashes are allowed in the trace name.
|
||||
trace_name = trace_name.replace('/', '_')
|
||||
|
||||
# The results for "test/chart" and "test/chart/*" will all be shown on the
|
||||
# same chart by the dashboard. The result with path "test/path" is considered
|
||||
# the main trace for the chart.
|
||||
test_path = '%s/%s/%s' % (test_name, chart_name, trace_name)
|
||||
if chart_name == trace_name:
|
||||
test_path = '%s/%s' % (test_name, chart_name)
|
||||
return test_path
|
||||
|
||||
|
||||
def _SendResultsJson(url, results_json, token_generator_callback):
|
||||
"""Make a HTTP POST with the given JSON to the Performance Dashboard.
|
||||
|
||||
Args:
|
||||
url: URL of Performance Dashboard instance, e.g.
|
||||
"https://chromeperf.appspot.com".
|
||||
results_json: JSON string that contains the data to be sent.
|
||||
|
||||
Returns:
|
||||
None if successful, or an error string if there were errors.
|
||||
"""
|
||||
# When data is provided to urllib2.Request, a POST is sent instead of GET.
|
||||
# The data must be in the application/x-www-form-urlencoded format.
|
||||
data = six.moves.urllib.parse.urlencode({'data': results_json})
|
||||
req = six.moves.urllib.request.Request(url + SEND_RESULTS_PATH, data)
|
||||
try:
|
||||
oauth_token = token_generator_callback()
|
||||
req.headers['Authorization'] = 'Bearer %s' % oauth_token
|
||||
|
||||
six.moves.urllib.request.urlopen(req, timeout=60 * 5)
|
||||
except (six.moves.urllib.error.HTTPError, six.moves.urllib.error.URLError,
|
||||
httplib.HTTPException):
|
||||
error = traceback.format_exc()
|
||||
|
||||
if 'HTTPError: 400' in error:
|
||||
# If the remote app rejects the JSON, it's probably malformed,
|
||||
# so we don't want to retry it.
|
||||
raise SendResultsFatalException('Discarding JSON, error:\n%s' % error)
|
||||
raise SendResultsRetryException(error)
|
||||
|
||||
|
||||
def _SendHistogramJson(url, histogramset_json, token_generator_callback):
|
||||
"""POST a HistogramSet JSON to the Performance Dashboard.
|
||||
|
||||
Args:
|
||||
url: URL of Performance Dashboard instance, e.g.
|
||||
"https://chromeperf.appspot.com".
|
||||
histogramset_json: JSON string that contains a serialized HistogramSet.
|
||||
|
||||
For |token_generator_callback|, see SendResults's
|
||||
documentation.
|
||||
|
||||
Returns:
|
||||
None if successful, or an error string if there were errors.
|
||||
"""
|
||||
try:
|
||||
oauth_token = token_generator_callback()
|
||||
|
||||
data = zlib.compress(histogramset_json)
|
||||
headers = {'Authorization': 'Bearer %s' % oauth_token, 'User-Agent': 'perf-uploader/1.0'}
|
||||
|
||||
http = httplib2.Http()
|
||||
|
||||
response, _ = http.request(
|
||||
url + SEND_HISTOGRAMS_PATH, method='POST', body=data, headers=headers)
|
||||
|
||||
# A 500 is presented on an exception on the dashboard side, timeout,
|
||||
# exception, etc. The dashboard can also send back 400 and 403, we could
|
||||
# recover from 403 (auth error), but 400 is generally malformed data.
|
||||
if response.status in (403, 500):
|
||||
raise SendResultsRetryException('HTTP Response %d: %s' %
|
||||
(response.status, response.reason))
|
||||
elif response.status != 200:
|
||||
raise SendResultsFatalException('HTTP Response %d: %s' %
|
||||
(response.status, response.reason))
|
||||
except httplib.ResponseNotReady:
|
||||
raise SendResultsRetryException(traceback.format_exc())
|
||||
except httplib2.HttpLib2Error:
|
||||
raise SendResultsRetryException(traceback.format_exc())
|
||||
237
tools/perf/core/upload_results_to_perf_dashboard.py
Executable file
237
tools/perf/core/upload_results_to_perf_dashboard.py
Executable file
@@ -0,0 +1,237 @@
|
||||
#!/usr/bin/env vpython
|
||||
# Copyright 2016 The Chromium Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
# This file was copy-pasted over from:
|
||||
# //build/scripts/slave/upload_perf_dashboard_results.py
|
||||
# with sections copied from:
|
||||
# //build/scripts/slave/slave_utils.py
|
||||
|
||||
import json
|
||||
import optparse
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import sys
|
||||
import tempfile
|
||||
import time
|
||||
import logging
|
||||
import six.moves.urllib.parse # pylint: disable=import-error
|
||||
|
||||
from core import results_dashboard
|
||||
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format='(%(levelname)s) %(asctime)s pid=%(process)d'
|
||||
' %(module)s.%(funcName)s:%(lineno)d %(message)s')
|
||||
|
||||
RESULTS_LINK_PATH = '/report?masters=%s&bots=%s&tests=%s&rev=%s'
|
||||
|
||||
|
||||
def _CommitPositionNumber(commit_pos):
|
||||
"""Extracts the number part of a commit position.
|
||||
|
||||
This is used to extract the number from got_revision_cp; This will be used
|
||||
as the value of "rev" in the data passed to results_dashboard.SendResults.
|
||||
"""
|
||||
return int(re.search(r'{#(\d+)}', commit_pos).group(1))
|
||||
|
||||
|
||||
def _GetDashboardJson(options):
|
||||
main_revision = _CommitPositionNumber(options.got_revision_cp)
|
||||
revisions = _GetPerfDashboardRevisionsWithProperties(options.got_webrtc_revision,
|
||||
options.got_v8_revision,
|
||||
options.git_revision, main_revision)
|
||||
reference_build = 'reference' in options.name
|
||||
stripped_test_name = options.name.replace('.reference', '')
|
||||
results = {}
|
||||
logging.info('Opening results file %s' % options.results_file)
|
||||
with open(options.results_file) as f:
|
||||
results = json.load(f)
|
||||
dashboard_json = {}
|
||||
if 'charts' not in results:
|
||||
# These are legacy results.
|
||||
# pylint: disable=redefined-variable-type
|
||||
dashboard_json = results_dashboard.MakeListOfPoints(
|
||||
results,
|
||||
options.configuration_name,
|
||||
stripped_test_name,
|
||||
options.project,
|
||||
options.buildbucket,
|
||||
options.buildername,
|
||||
options.buildnumber, {},
|
||||
options.perf_dashboard_machine_group,
|
||||
revisions_dict=revisions)
|
||||
else:
|
||||
dashboard_json = results_dashboard.MakeDashboardJsonV1(
|
||||
results,
|
||||
revisions,
|
||||
stripped_test_name,
|
||||
options.configuration_name,
|
||||
options.project,
|
||||
options.buildbucket,
|
||||
options.buildername,
|
||||
options.buildnumber, {},
|
||||
reference_build,
|
||||
perf_dashboard_machine_group=options.perf_dashboard_machine_group)
|
||||
return dashboard_json
|
||||
|
||||
|
||||
def _GetDashboardHistogramData(options):
|
||||
revisions = {
|
||||
'--chromium_commit_positions': _CommitPositionNumber(options.got_revision_cp),
|
||||
'--chromium_revisions': options.git_revision
|
||||
}
|
||||
|
||||
if options.got_webrtc_revision:
|
||||
revisions['--webrtc_revisions'] = options.got_webrtc_revision
|
||||
if options.got_v8_revision:
|
||||
revisions['--v8_revisions'] = options.got_v8_revision
|
||||
|
||||
is_reference_build = 'reference' in options.name
|
||||
stripped_test_name = options.name.replace('.reference', '')
|
||||
|
||||
max_bytes = 1 << 20
|
||||
output_dir = tempfile.mkdtemp()
|
||||
|
||||
try:
|
||||
begin_time = time.time()
|
||||
results_dashboard.MakeHistogramSetWithDiagnostics(
|
||||
histograms_file=options.results_file,
|
||||
test_name=stripped_test_name,
|
||||
bot=options.configuration_name,
|
||||
buildername=options.buildername,
|
||||
buildnumber=options.buildnumber,
|
||||
project=options.project,
|
||||
buildbucket=options.buildbucket,
|
||||
revisions_dict=revisions,
|
||||
is_reference_build=is_reference_build,
|
||||
perf_dashboard_machine_group=options.perf_dashboard_machine_group,
|
||||
output_dir=output_dir,
|
||||
max_bytes=max_bytes)
|
||||
end_time = time.time()
|
||||
logging.info('Duration of adding diagnostics for %s: %d seconds' %
|
||||
(stripped_test_name, end_time - begin_time))
|
||||
|
||||
# Read all batch files from output_dir.
|
||||
dashboard_jsons = []
|
||||
for basename in os.listdir(output_dir):
|
||||
with open(os.path.join(output_dir, basename)) as f:
|
||||
dashboard_jsons.append(json.load(f))
|
||||
|
||||
return dashboard_jsons
|
||||
finally:
|
||||
shutil.rmtree(output_dir)
|
||||
|
||||
|
||||
def _CreateParser():
|
||||
# Parse options
|
||||
parser = optparse.OptionParser()
|
||||
parser.add_option('--name')
|
||||
parser.add_option('--results-file')
|
||||
parser.add_option('--output-json-file')
|
||||
parser.add_option('--got-revision-cp')
|
||||
parser.add_option('--configuration-name')
|
||||
parser.add_option('--results-url')
|
||||
parser.add_option('--perf-dashboard-machine-group')
|
||||
parser.add_option('--project')
|
||||
parser.add_option('--buildbucket')
|
||||
parser.add_option('--buildername')
|
||||
parser.add_option('--buildnumber')
|
||||
parser.add_option('--got-webrtc-revision')
|
||||
parser.add_option('--got-v8-revision')
|
||||
parser.add_option('--git-revision')
|
||||
parser.add_option('--output-json-dashboard-url')
|
||||
parser.add_option('--send-as-histograms', action='store_true')
|
||||
return parser
|
||||
|
||||
|
||||
def main(args):
|
||||
parser = _CreateParser()
|
||||
options, extra_args = parser.parse_args(args)
|
||||
|
||||
# Validate options.
|
||||
if extra_args:
|
||||
parser.error('Unexpected command line arguments')
|
||||
if not options.configuration_name or not options.results_url:
|
||||
parser.error('configuration_name and results_url are required.')
|
||||
|
||||
if not options.perf_dashboard_machine_group:
|
||||
logging.error('Invalid perf dashboard machine group')
|
||||
return 1
|
||||
|
||||
if not options.send_as_histograms:
|
||||
dashboard_json = _GetDashboardJson(options)
|
||||
dashboard_jsons = []
|
||||
if dashboard_json:
|
||||
dashboard_jsons.append(dashboard_json)
|
||||
else:
|
||||
dashboard_jsons = _GetDashboardHistogramData(options)
|
||||
|
||||
# The HistogramSet might have been batched if it would be too large to
|
||||
# upload together. It's safe to concatenate the batches in order to write
|
||||
# output_json_file.
|
||||
# TODO(crbug.com/918208): Use a script in catapult to merge dashboard_jsons.
|
||||
dashboard_json = sum(dashboard_jsons, [])
|
||||
|
||||
if options.output_json_file:
|
||||
json.dump(dashboard_json, options.output_json_file, indent=4, separators=(',', ': '))
|
||||
|
||||
if dashboard_jsons:
|
||||
if options.output_json_dashboard_url:
|
||||
# Dump dashboard url to file.
|
||||
dashboard_url = GetDashboardUrl(options.name, options.configuration_name,
|
||||
options.results_url, options.got_revision_cp,
|
||||
options.perf_dashboard_machine_group)
|
||||
with open(options.output_json_dashboard_url, 'w') as f:
|
||||
json.dump(dashboard_url if dashboard_url else '', f)
|
||||
|
||||
for batch in dashboard_jsons:
|
||||
if not results_dashboard.SendResults(
|
||||
batch,
|
||||
options.name,
|
||||
options.results_url,
|
||||
send_as_histograms=options.send_as_histograms):
|
||||
return 1
|
||||
else:
|
||||
# The upload didn't fail since there was no data to upload.
|
||||
logging.warning('No perf dashboard JSON was produced.')
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main((sys.argv[1:])))
|
||||
|
||||
|
||||
def GetDashboardUrl(name, configuration_name, results_url, got_revision_cp,
|
||||
perf_dashboard_machine_group):
|
||||
"""Optionally writes the dashboard URL to a file and returns a link to the
|
||||
dashboard.
|
||||
"""
|
||||
name = name.replace('.reference', '')
|
||||
dashboard_url = results_url + RESULTS_LINK_PATH % (
|
||||
six.moves.urllib.parse.quote(perf_dashboard_machine_group),
|
||||
six.moves.urllib.parse.quote(configuration_name), six.moves.urllib.parse.quote(name),
|
||||
_CommitPositionNumber(got_revision_cp))
|
||||
|
||||
return dashboard_url
|
||||
|
||||
|
||||
def _GetPerfDashboardRevisionsWithProperties(got_webrtc_revision,
|
||||
got_v8_revision,
|
||||
git_revision,
|
||||
main_revision,
|
||||
point_id=None):
|
||||
"""Fills in the same revisions fields that process_log_utils does."""
|
||||
versions = {}
|
||||
versions['rev'] = main_revision
|
||||
versions['webrtc_git'] = got_webrtc_revision
|
||||
versions['v8_rev'] = got_v8_revision
|
||||
versions['git_revision'] = git_revision
|
||||
versions['point_id'] = point_id
|
||||
# There are a lot of "bad" revisions to check for, so clean them all up here.
|
||||
for key in versions.keys():
|
||||
if not versions[key] or versions[key] == 'undefined':
|
||||
del versions[key]
|
||||
return versions
|
||||
72
tools/perf/cross_device_test_config.py
Normal file
72
tools/perf/cross_device_test_config.py
Normal file
@@ -0,0 +1,72 @@
|
||||
# Copyright 2021 The Chromium Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
# Dictionary for the repeat config.
|
||||
# E.g.:
|
||||
# {
|
||||
# 'builder-1':
|
||||
# {
|
||||
# 'benchmark-1':
|
||||
# {
|
||||
# 'story-1': 4,
|
||||
# }
|
||||
# 'builder-2':
|
||||
# 'benchmark-2':
|
||||
# {
|
||||
# 'story-1': 10,
|
||||
# 'story-2': 10,
|
||||
# }
|
||||
# }
|
||||
|
||||
TARGET_DEVICES = {
|
||||
'android-pixel2-perf-fyi': {
|
||||
'speedometer2': {
|
||||
'Speedometer2': 3,
|
||||
},
|
||||
'rendering.mobile': {
|
||||
'css_transitions_triggered_style_element': 4,
|
||||
'canvas_animation_no_clear': 4
|
||||
},
|
||||
'system_health.common_mobile': 3,
|
||||
'system_health.memory_mobile': 3,
|
||||
},
|
||||
'android-pixel2-perf': {
|
||||
'system_health.common_mobile': {
|
||||
# timeToFirstContentfulPaint
|
||||
'browse:media:googleplaystore:2019': 10,
|
||||
'load:social:pinterest:2019': 10,
|
||||
'browse:media:facebook_photos:2019': 10
|
||||
}
|
||||
},
|
||||
'android-go-perf': {
|
||||
'system_health.common_mobile': {
|
||||
# timeToFirstContentfulPaint
|
||||
'background:social:facebook:2019': 10,
|
||||
# cputimeToFirstContentfulPaint
|
||||
'load:search:google:2018': 10
|
||||
}
|
||||
},
|
||||
'linux-perf': {
|
||||
'system_health.common_desktop': {
|
||||
# cputimeToFirstContentfulPaint
|
||||
'browse:social:tumblr_infinite_scroll:2018': 10,
|
||||
'long_running:tools:gmail-background': 10,
|
||||
'browse:media:youtubetv:2019': 10
|
||||
}
|
||||
},
|
||||
'win-10-perf': {
|
||||
'system_health.common_desktop': {
|
||||
# cputimeToFirstContentfulPaint
|
||||
'browse:media:tumblr:2018': 10,
|
||||
'browse:social:tumblr_infinite_scroll:2018': 10,
|
||||
'load:search:google:2018': 10,
|
||||
}
|
||||
},
|
||||
'linux-perf-calibration': {
|
||||
'speedometer2': {
|
||||
'Speedometer2': 28,
|
||||
},
|
||||
'blink_perf.shadow_dom': 31
|
||||
}
|
||||
}
|
||||
670
tools/perf/process_perf_results.py
Executable file
670
tools/perf/process_perf_results.py
Executable file
@@ -0,0 +1,670 @@
|
||||
#!/usr/bin/env vpython
|
||||
# Copyright 2018 The Chromium Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
import collections
|
||||
import json
|
||||
import logging
|
||||
import multiprocessing
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
import tempfile
|
||||
import time
|
||||
import uuid
|
||||
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format='(%(levelname)s) %(asctime)s pid=%(process)d'
|
||||
' %(module)s.%(funcName)s:%(lineno)d %(message)s')
|
||||
|
||||
import cross_device_test_config
|
||||
|
||||
from core import path_util
|
||||
from core import upload_results_to_perf_dashboard
|
||||
from core import results_merger
|
||||
|
||||
path_util.AddAndroidPylibToPath()
|
||||
|
||||
try:
|
||||
from pylib.utils import logdog_helper
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
RESULTS_URL = 'https://chromeperf.appspot.com'
|
||||
|
||||
# Until we are migrated to LUCI, we will be utilizing a hard
|
||||
# coded master name based on what is passed in in the build properties.
|
||||
# See crbug.com/801289 for more details.
|
||||
MACHINE_GROUP_JSON_FILE = os.path.join(path_util.GetChromiumSrcDir(), 'tools', 'perf', 'core',
|
||||
'perf_dashboard_machine_group_mapping.json')
|
||||
|
||||
JSON_CONTENT_TYPE = 'application/json'
|
||||
|
||||
# Cache of what data format (ChartJSON, Histograms, etc.) each results file is
|
||||
# in so that only one disk read is required when checking the format multiple
|
||||
# times.
|
||||
_data_format_cache = {}
|
||||
DATA_FORMAT_GTEST = 'gtest'
|
||||
DATA_FORMAT_CHARTJSON = 'chartjson'
|
||||
DATA_FORMAT_HISTOGRAMS = 'histograms'
|
||||
DATA_FORMAT_UNKNOWN = 'unknown'
|
||||
|
||||
|
||||
def _GetMachineGroup(build_properties):
|
||||
machine_group = None
|
||||
if build_properties.get('perf_dashboard_machine_group', False):
|
||||
# Once luci migration is complete this will exist as a property
|
||||
# in the build properties
|
||||
machine_group = build_properties['perf_dashboard_machine_group']
|
||||
else:
|
||||
builder_group_mapping = {}
|
||||
with open(MACHINE_GROUP_JSON_FILE) as fp:
|
||||
builder_group_mapping = json.load(fp)
|
||||
if build_properties.get('builder_group', False):
|
||||
legacy_builder_group = build_properties['builder_group']
|
||||
else:
|
||||
# TODO(crbug.com/1153958): remove reference to mastername.
|
||||
legacy_builder_group = build_properties['mastername']
|
||||
if builder_group_mapping.get(legacy_builder_group):
|
||||
machine_group = builder_group_mapping[legacy_builder_group]
|
||||
if not machine_group:
|
||||
raise ValueError('Must set perf_dashboard_machine_group or have a valid '
|
||||
'mapping in '
|
||||
'src/tools/perf/core/perf_dashboard_machine_group_mapping.json'
|
||||
'See bit.ly/perf-dashboard-machine-group for more details')
|
||||
return machine_group
|
||||
|
||||
|
||||
def _upload_perf_results(json_to_upload, name, configuration_name, build_properties,
|
||||
output_json_file):
|
||||
"""Upload the contents of result JSON(s) to the perf dashboard."""
|
||||
args = [
|
||||
'--buildername', build_properties['buildername'], '--buildnumber',
|
||||
build_properties['buildnumber'], '--name', name, '--configuration-name',
|
||||
configuration_name, '--results-file', json_to_upload, '--results-url', RESULTS_URL,
|
||||
'--got-revision-cp', build_properties['got_revision_cp'], '--got-v8-revision',
|
||||
build_properties['got_v8_revision'], '--got-webrtc-revision',
|
||||
build_properties['got_webrtc_revision'], '--output-json-file', output_json_file,
|
||||
'--perf-dashboard-machine-group',
|
||||
_GetMachineGroup(build_properties)
|
||||
]
|
||||
buildbucket = build_properties.get('buildbucket', {})
|
||||
if isinstance(buildbucket, basestring):
|
||||
buildbucket = json.loads(buildbucket)
|
||||
|
||||
if 'build' in buildbucket:
|
||||
args += [
|
||||
'--project',
|
||||
buildbucket['build'].get('project'),
|
||||
'--buildbucket',
|
||||
buildbucket['build'].get('bucket'),
|
||||
]
|
||||
|
||||
if build_properties.get('git_revision'):
|
||||
args.append('--git-revision')
|
||||
args.append(build_properties['git_revision'])
|
||||
if _is_histogram(json_to_upload):
|
||||
args.append('--send-as-histograms')
|
||||
|
||||
#TODO(crbug.com/1072729): log this in top level
|
||||
logging.info('upload_results_to_perf_dashboard: %s.' % args)
|
||||
|
||||
return upload_results_to_perf_dashboard.main(args)
|
||||
|
||||
|
||||
def _is_histogram(json_file):
|
||||
return _determine_data_format(json_file) == DATA_FORMAT_HISTOGRAMS
|
||||
|
||||
|
||||
def _is_gtest(json_file):
|
||||
return _determine_data_format(json_file) == DATA_FORMAT_GTEST
|
||||
|
||||
|
||||
def _determine_data_format(json_file):
|
||||
if json_file not in _data_format_cache:
|
||||
with open(json_file) as f:
|
||||
data = json.load(f)
|
||||
if isinstance(data, list):
|
||||
_data_format_cache[json_file] = DATA_FORMAT_HISTOGRAMS
|
||||
elif isinstance(data, dict):
|
||||
if 'charts' in data:
|
||||
_data_format_cache[json_file] = DATA_FORMAT_CHARTJSON
|
||||
else:
|
||||
_data_format_cache[json_file] = DATA_FORMAT_GTEST
|
||||
else:
|
||||
_data_format_cache[json_file] = DATA_FORMAT_UNKNOWN
|
||||
return _data_format_cache[json_file]
|
||||
_data_format_cache[json_file] = DATA_FORMAT_UNKNOWN
|
||||
return _data_format_cache[json_file]
|
||||
|
||||
|
||||
def _merge_json_output(output_json, jsons_to_merge, extra_links, test_cross_device=False):
|
||||
"""Merges the contents of one or more results JSONs.
|
||||
|
||||
Args:
|
||||
output_json: A path to a JSON file to which the merged results should be
|
||||
written.
|
||||
jsons_to_merge: A list of JSON files that should be merged.
|
||||
extra_links: a (key, value) map in which keys are the human-readable strings
|
||||
which describe the data, and value is logdog url that contain the data.
|
||||
"""
|
||||
begin_time = time.time()
|
||||
merged_results = results_merger.merge_test_results(jsons_to_merge, test_cross_device)
|
||||
|
||||
# Only append the perf results links if present
|
||||
if extra_links:
|
||||
merged_results['links'] = extra_links
|
||||
|
||||
with open(output_json, 'w') as f:
|
||||
json.dump(merged_results, f)
|
||||
|
||||
end_time = time.time()
|
||||
print_duration('Merging json test results', begin_time, end_time)
|
||||
return 0
|
||||
|
||||
|
||||
def _handle_perf_json_test_results(benchmark_directory_map, test_results_list):
|
||||
"""Checks the test_results.json under each folder:
|
||||
|
||||
1. mark the benchmark 'enabled' if tests results are found
|
||||
2. add the json content to a list for non-ref.
|
||||
"""
|
||||
begin_time = time.time()
|
||||
benchmark_enabled_map = {}
|
||||
for benchmark_name, directories in benchmark_directory_map.items():
|
||||
for directory in directories:
|
||||
# Obtain the test name we are running
|
||||
is_ref = '.reference' in benchmark_name
|
||||
enabled = True
|
||||
try:
|
||||
with open(os.path.join(directory, 'test_results.json')) as json_data:
|
||||
json_results = json.load(json_data)
|
||||
if not json_results:
|
||||
# Output is null meaning the test didn't produce any results.
|
||||
# Want to output an error and continue loading the rest of the
|
||||
# test results.
|
||||
logging.warning('No results produced for %s, skipping upload' % directory)
|
||||
continue
|
||||
if json_results.get('version') == 3:
|
||||
# Non-telemetry tests don't have written json results but
|
||||
# if they are executing then they are enabled and will generate
|
||||
# chartjson results.
|
||||
if not bool(json_results.get('tests')):
|
||||
enabled = False
|
||||
if not is_ref:
|
||||
# We don't need to upload reference build data to the
|
||||
# flakiness dashboard since we don't monitor the ref build
|
||||
test_results_list.append(json_results)
|
||||
except IOError as e:
|
||||
# TODO(crbug.com/936602): Figure out how to surface these errors. Should
|
||||
# we have a non-zero exit code if we error out?
|
||||
logging.error('Failed to obtain test results for %s: %s', benchmark_name, e)
|
||||
continue
|
||||
if not enabled:
|
||||
# We don't upload disabled benchmarks or tests that are run
|
||||
# as a smoke test
|
||||
logging.info('Benchmark %s ran no tests on at least one shard' % benchmark_name)
|
||||
continue
|
||||
benchmark_enabled_map[benchmark_name] = True
|
||||
|
||||
end_time = time.time()
|
||||
print_duration('Analyzing perf json test results', begin_time, end_time)
|
||||
return benchmark_enabled_map
|
||||
|
||||
|
||||
def _generate_unique_logdog_filename(name_prefix):
|
||||
return name_prefix + '_' + str(uuid.uuid4())
|
||||
|
||||
|
||||
def _handle_perf_logs(benchmark_directory_map, extra_links):
|
||||
""" Upload benchmark logs to logdog and add a page entry for them. """
|
||||
begin_time = time.time()
|
||||
benchmark_logs_links = collections.defaultdict(list)
|
||||
|
||||
for benchmark_name, directories in benchmark_directory_map.items():
|
||||
for directory in directories:
|
||||
benchmark_log_file = os.path.join(directory, 'benchmark_log.txt')
|
||||
if os.path.exists(benchmark_log_file):
|
||||
with open(benchmark_log_file) as f:
|
||||
uploaded_link = logdog_helper.text(
|
||||
name=_generate_unique_logdog_filename(benchmark_name), data=f.read())
|
||||
benchmark_logs_links[benchmark_name].append(uploaded_link)
|
||||
|
||||
logdog_file_name = _generate_unique_logdog_filename('Benchmarks_Logs')
|
||||
logdog_stream = logdog_helper.text(
|
||||
logdog_file_name,
|
||||
json.dumps(benchmark_logs_links, sort_keys=True, indent=4, separators=(',', ': ')),
|
||||
content_type=JSON_CONTENT_TYPE)
|
||||
extra_links['Benchmarks logs'] = logdog_stream
|
||||
end_time = time.time()
|
||||
print_duration('Generating perf log streams', begin_time, end_time)
|
||||
|
||||
|
||||
def _handle_benchmarks_shard_map(benchmarks_shard_map_file, extra_links):
|
||||
begin_time = time.time()
|
||||
with open(benchmarks_shard_map_file) as f:
|
||||
benchmarks_shard_data = f.read()
|
||||
logdog_file_name = _generate_unique_logdog_filename('Benchmarks_Shard_Map')
|
||||
logdog_stream = logdog_helper.text(
|
||||
logdog_file_name, benchmarks_shard_data, content_type=JSON_CONTENT_TYPE)
|
||||
extra_links['Benchmarks shard map'] = logdog_stream
|
||||
end_time = time.time()
|
||||
print_duration('Generating benchmark shard map stream', begin_time, end_time)
|
||||
|
||||
|
||||
def _get_benchmark_name(directory):
|
||||
return os.path.basename(directory).replace(" benchmark", "")
|
||||
|
||||
|
||||
def _scan_output_dir(task_output_dir):
|
||||
benchmark_directory_map = {}
|
||||
benchmarks_shard_map_file = None
|
||||
|
||||
directory_list = [
|
||||
f for f in os.listdir(task_output_dir)
|
||||
if not os.path.isfile(os.path.join(task_output_dir, f))
|
||||
]
|
||||
benchmark_directory_list = []
|
||||
for directory in directory_list:
|
||||
for f in os.listdir(os.path.join(task_output_dir, directory)):
|
||||
path = os.path.join(task_output_dir, directory, f)
|
||||
if os.path.isdir(path):
|
||||
benchmark_directory_list.append(path)
|
||||
elif path.endswith('benchmarks_shard_map.json'):
|
||||
benchmarks_shard_map_file = path
|
||||
# Now create a map of benchmark name to the list of directories
|
||||
# the lists were written to.
|
||||
for directory in benchmark_directory_list:
|
||||
benchmark_name = _get_benchmark_name(directory)
|
||||
if benchmark_name in benchmark_directory_map.keys():
|
||||
benchmark_directory_map[benchmark_name].append(directory)
|
||||
else:
|
||||
benchmark_directory_map[benchmark_name] = [directory]
|
||||
|
||||
return benchmark_directory_map, benchmarks_shard_map_file
|
||||
|
||||
|
||||
def process_perf_results(output_json,
|
||||
configuration_name,
|
||||
build_properties,
|
||||
task_output_dir,
|
||||
smoke_test_mode,
|
||||
output_results_dir,
|
||||
lightweight=False,
|
||||
skip_perf=False):
|
||||
"""Process perf results.
|
||||
|
||||
Consists of merging the json-test-format output, uploading the perf test
|
||||
output (chartjson and histogram), and store the benchmark logs in logdog.
|
||||
|
||||
Each directory in the task_output_dir represents one benchmark
|
||||
that was run. Within this directory, there is a subdirectory with the name
|
||||
of the benchmark that was run. In that subdirectory, there is a
|
||||
perftest-output.json file containing the performance results in histogram
|
||||
or dashboard json format and an output.json file containing the json test
|
||||
results for the benchmark.
|
||||
|
||||
Returns:
|
||||
(return_code, upload_results_map):
|
||||
return_code is 0 if the whole operation is successful, non zero otherwise.
|
||||
benchmark_upload_result_map: the dictionary that describe which benchmarks
|
||||
were successfully uploaded.
|
||||
"""
|
||||
handle_perf = not lightweight or not skip_perf
|
||||
handle_non_perf = not lightweight or skip_perf
|
||||
logging.info('lightweight mode: %r; handle_perf: %r; handle_non_perf: %r' %
|
||||
(lightweight, handle_perf, handle_non_perf))
|
||||
|
||||
begin_time = time.time()
|
||||
return_code = 0
|
||||
benchmark_upload_result_map = {}
|
||||
|
||||
benchmark_directory_map, benchmarks_shard_map_file = _scan_output_dir(task_output_dir)
|
||||
|
||||
test_results_list = []
|
||||
extra_links = {}
|
||||
|
||||
if handle_non_perf:
|
||||
# First, upload benchmarks shard map to logdog and add a page
|
||||
# entry for it in extra_links.
|
||||
if benchmarks_shard_map_file:
|
||||
_handle_benchmarks_shard_map(benchmarks_shard_map_file, extra_links)
|
||||
|
||||
# Second, upload all the benchmark logs to logdog and add a page entry for
|
||||
# those links in extra_links.
|
||||
_handle_perf_logs(benchmark_directory_map, extra_links)
|
||||
|
||||
# Then try to obtain the list of json test results to merge
|
||||
# and determine the status of each benchmark.
|
||||
benchmark_enabled_map = _handle_perf_json_test_results(benchmark_directory_map,
|
||||
test_results_list)
|
||||
|
||||
build_properties_map = json.loads(build_properties)
|
||||
if not configuration_name:
|
||||
# we are deprecating perf-id crbug.com/817823
|
||||
configuration_name = build_properties_map['buildername']
|
||||
|
||||
if not smoke_test_mode and handle_perf:
|
||||
try:
|
||||
return_code, benchmark_upload_result_map = _handle_perf_results(
|
||||
benchmark_enabled_map, benchmark_directory_map, configuration_name,
|
||||
build_properties_map, extra_links, output_results_dir)
|
||||
except Exception:
|
||||
logging.exception('Error handling perf results jsons')
|
||||
return_code = 1
|
||||
|
||||
if handle_non_perf:
|
||||
# Finally, merge all test results json, add the extra links and write out to
|
||||
# output location
|
||||
try:
|
||||
_merge_json_output(output_json, test_results_list, extra_links,
|
||||
configuration_name in cross_device_test_config.TARGET_DEVICES)
|
||||
except Exception:
|
||||
logging.exception('Error handling test results jsons.')
|
||||
|
||||
end_time = time.time()
|
||||
print_duration('Total process_perf_results', begin_time, end_time)
|
||||
return return_code, benchmark_upload_result_map
|
||||
|
||||
|
||||
def _merge_chartjson_results(chartjson_dicts):
|
||||
merged_results = chartjson_dicts[0]
|
||||
for chartjson_dict in chartjson_dicts[1:]:
|
||||
for key in chartjson_dict:
|
||||
if key == 'charts':
|
||||
for add_key in chartjson_dict[key]:
|
||||
merged_results[key][add_key] = chartjson_dict[key][add_key]
|
||||
return merged_results
|
||||
|
||||
|
||||
def _merge_histogram_results(histogram_lists):
|
||||
merged_results = []
|
||||
for histogram_list in histogram_lists:
|
||||
merged_results += histogram_list
|
||||
|
||||
return merged_results
|
||||
|
||||
|
||||
def _merge_perf_results(benchmark_name, results_filename, directories):
|
||||
begin_time = time.time()
|
||||
collected_results = []
|
||||
for directory in directories:
|
||||
filename = os.path.join(directory, 'perf_results.json')
|
||||
try:
|
||||
with open(filename) as pf:
|
||||
collected_results.append(json.load(pf))
|
||||
except IOError as e:
|
||||
# TODO(crbug.com/936602): Figure out how to surface these errors. Should
|
||||
# we have a non-zero exit code if we error out?
|
||||
logging.error('Failed to obtain perf results from %s: %s', directory, e)
|
||||
if not collected_results:
|
||||
logging.error('Failed to obtain any perf results from %s.', benchmark_name)
|
||||
return
|
||||
|
||||
# Assuming that multiple shards will only be chartjson or histogram set
|
||||
# Non-telemetry benchmarks only ever run on one shard
|
||||
merged_results = []
|
||||
if isinstance(collected_results[0], dict):
|
||||
merged_results = _merge_chartjson_results(collected_results)
|
||||
elif isinstance(collected_results[0], list):
|
||||
merged_results = _merge_histogram_results(collected_results)
|
||||
|
||||
with open(results_filename, 'w') as rf:
|
||||
json.dump(merged_results, rf)
|
||||
|
||||
end_time = time.time()
|
||||
print_duration(('%s results merging' % (benchmark_name)), begin_time, end_time)
|
||||
|
||||
|
||||
def _upload_individual(benchmark_name, directories, configuration_name, build_properties,
|
||||
output_json_file):
|
||||
tmpfile_dir = tempfile.mkdtemp()
|
||||
try:
|
||||
upload_begin_time = time.time()
|
||||
# There are potentially multiple directores with results, re-write and
|
||||
# merge them if necessary
|
||||
results_filename = None
|
||||
if len(directories) > 1:
|
||||
merge_perf_dir = os.path.join(os.path.abspath(tmpfile_dir), benchmark_name)
|
||||
if not os.path.exists(merge_perf_dir):
|
||||
os.makedirs(merge_perf_dir)
|
||||
results_filename = os.path.join(merge_perf_dir, 'merged_perf_results.json')
|
||||
_merge_perf_results(benchmark_name, results_filename, directories)
|
||||
else:
|
||||
# It was only written to one shard, use that shards data
|
||||
results_filename = os.path.join(directories[0], 'perf_results.json')
|
||||
|
||||
results_size_in_mib = os.path.getsize(results_filename) / (2**20)
|
||||
logging.info('Uploading perf results from %s benchmark (size %s Mib)' %
|
||||
(benchmark_name, results_size_in_mib))
|
||||
with open(output_json_file, 'w') as oj:
|
||||
upload_return_code = _upload_perf_results(results_filename, benchmark_name,
|
||||
configuration_name, build_properties, oj)
|
||||
upload_end_time = time.time()
|
||||
print_duration(('%s upload time' % (benchmark_name)), upload_begin_time,
|
||||
upload_end_time)
|
||||
return (benchmark_name, upload_return_code == 0)
|
||||
finally:
|
||||
shutil.rmtree(tmpfile_dir)
|
||||
|
||||
|
||||
def _upload_individual_benchmark(params):
|
||||
try:
|
||||
return _upload_individual(*params)
|
||||
except Exception:
|
||||
benchmark_name = params[0]
|
||||
upload_succeed = False
|
||||
logging.exception('Error uploading perf result of %s' % benchmark_name)
|
||||
return benchmark_name, upload_succeed
|
||||
|
||||
|
||||
def _GetCpuCount(log=True):
|
||||
try:
|
||||
cpu_count = multiprocessing.cpu_count()
|
||||
if sys.platform == 'win32':
|
||||
# TODO(crbug.com/1190269) - we can't use more than 56
|
||||
# cores on Windows or Python3 may hang.
|
||||
cpu_count = min(cpu_count, 56)
|
||||
return cpu_count
|
||||
except NotImplementedError:
|
||||
if log:
|
||||
logging.warn('Failed to get a CPU count for this bot. See crbug.com/947035.')
|
||||
# TODO(crbug.com/948281): This is currently set to 4 since the mac masters
|
||||
# only have 4 cores. Once we move to all-linux, this can be increased or
|
||||
# we can even delete this whole function and use multiprocessing.cpu_count()
|
||||
# directly.
|
||||
return 4
|
||||
|
||||
|
||||
def _handle_perf_results(benchmark_enabled_map, benchmark_directory_map, configuration_name,
|
||||
build_properties, extra_links, output_results_dir):
|
||||
"""
|
||||
Upload perf results to the perf dashboard.
|
||||
|
||||
This method also upload the perf results to logdog and augment it to
|
||||
|extra_links|.
|
||||
|
||||
Returns:
|
||||
(return_code, benchmark_upload_result_map)
|
||||
return_code is 0 if this upload to perf dashboard successfully, 1
|
||||
otherwise.
|
||||
benchmark_upload_result_map is a dictionary describes which benchmark
|
||||
was successfully uploaded.
|
||||
"""
|
||||
begin_time = time.time()
|
||||
# Upload all eligible benchmarks to the perf dashboard
|
||||
results_dict = {}
|
||||
|
||||
invocations = []
|
||||
for benchmark_name, directories in benchmark_directory_map.items():
|
||||
if not benchmark_enabled_map.get(benchmark_name, False):
|
||||
continue
|
||||
# Create a place to write the perf results that you will write out to
|
||||
# logdog.
|
||||
output_json_file = os.path.join(output_results_dir, (str(uuid.uuid4()) + benchmark_name))
|
||||
results_dict[benchmark_name] = output_json_file
|
||||
#TODO(crbug.com/1072729): pass final arguments instead of build properties
|
||||
# and configuration_name
|
||||
invocations.append(
|
||||
(benchmark_name, directories, configuration_name, build_properties, output_json_file))
|
||||
|
||||
# Kick off the uploads in multiple processes
|
||||
# crbug.com/1035930: We are hitting HTTP Response 429. Limit ourselves
|
||||
# to 2 processes to avoid this error. Uncomment the following code once
|
||||
# the problem is fixed on the dashboard side.
|
||||
# pool = multiprocessing.Pool(_GetCpuCount())
|
||||
pool = multiprocessing.Pool(2)
|
||||
upload_result_timeout = False
|
||||
try:
|
||||
async_result = pool.map_async(_upload_individual_benchmark, invocations)
|
||||
# TODO(crbug.com/947035): What timeout is reasonable?
|
||||
results = async_result.get(timeout=4000)
|
||||
except multiprocessing.TimeoutError:
|
||||
upload_result_timeout = True
|
||||
logging.error('Timeout uploading benchmarks to perf dashboard in parallel')
|
||||
results = []
|
||||
for benchmark_name in benchmark_directory_map:
|
||||
results.append((benchmark_name, False))
|
||||
finally:
|
||||
pool.terminate()
|
||||
|
||||
# Keep a mapping of benchmarks to their upload results
|
||||
benchmark_upload_result_map = {}
|
||||
for r in results:
|
||||
benchmark_upload_result_map[r[0]] = r[1]
|
||||
|
||||
logdog_dict = {}
|
||||
upload_failures_counter = 0
|
||||
logdog_stream = None
|
||||
logdog_label = 'Results Dashboard'
|
||||
for benchmark_name, output_file in results_dict.items():
|
||||
upload_succeed = benchmark_upload_result_map[benchmark_name]
|
||||
if not upload_succeed:
|
||||
upload_failures_counter += 1
|
||||
is_reference = '.reference' in benchmark_name
|
||||
_write_perf_data_to_logfile(
|
||||
benchmark_name,
|
||||
output_file,
|
||||
configuration_name,
|
||||
build_properties,
|
||||
logdog_dict,
|
||||
is_reference,
|
||||
upload_failure=not upload_succeed)
|
||||
|
||||
logdog_file_name = _generate_unique_logdog_filename('Results_Dashboard_')
|
||||
logdog_stream = logdog_helper.text(
|
||||
logdog_file_name,
|
||||
json.dumps(dict(logdog_dict), sort_keys=True, indent=4, separators=(',', ': ')),
|
||||
content_type=JSON_CONTENT_TYPE)
|
||||
if upload_failures_counter > 0:
|
||||
logdog_label += (' %s merge script perf data upload failures' % upload_failures_counter)
|
||||
extra_links[logdog_label] = logdog_stream
|
||||
end_time = time.time()
|
||||
print_duration('Uploading results to perf dashboard', begin_time, end_time)
|
||||
if upload_result_timeout or upload_failures_counter > 0:
|
||||
return 1, benchmark_upload_result_map
|
||||
return 0, benchmark_upload_result_map
|
||||
|
||||
|
||||
def _write_perf_data_to_logfile(benchmark_name, output_file, configuration_name, build_properties,
|
||||
logdog_dict, is_ref, upload_failure):
|
||||
viewer_url = None
|
||||
# logdog file to write perf results to
|
||||
if os.path.exists(output_file):
|
||||
results = None
|
||||
with open(output_file) as f:
|
||||
try:
|
||||
results = json.load(f)
|
||||
except ValueError:
|
||||
logging.error('Error parsing perf results JSON for benchmark %s' % benchmark_name)
|
||||
if results:
|
||||
try:
|
||||
output_json_file = logdog_helper.open_text(benchmark_name)
|
||||
json.dump(results, output_json_file, indent=4, separators=(',', ': '))
|
||||
except ValueError as e:
|
||||
logging.error('ValueError: "%s" while dumping output to logdog' % e)
|
||||
finally:
|
||||
output_json_file.close()
|
||||
viewer_url = output_json_file.get_viewer_url()
|
||||
else:
|
||||
logging.warning("Perf results JSON file doesn't exist for benchmark %s" % benchmark_name)
|
||||
|
||||
base_benchmark_name = benchmark_name.replace('.reference', '')
|
||||
|
||||
if base_benchmark_name not in logdog_dict:
|
||||
logdog_dict[base_benchmark_name] = {}
|
||||
|
||||
# add links for the perf results and the dashboard url to
|
||||
# the logs section of buildbot
|
||||
if is_ref:
|
||||
if viewer_url:
|
||||
logdog_dict[base_benchmark_name]['perf_results_ref'] = viewer_url
|
||||
if upload_failure:
|
||||
logdog_dict[base_benchmark_name]['ref_upload_failed'] = 'True'
|
||||
else:
|
||||
logdog_dict[base_benchmark_name]['dashboard_url'] = (
|
||||
upload_results_to_perf_dashboard.GetDashboardUrl(benchmark_name, configuration_name,
|
||||
RESULTS_URL,
|
||||
build_properties['got_revision_cp'],
|
||||
_GetMachineGroup(build_properties)))
|
||||
if viewer_url:
|
||||
logdog_dict[base_benchmark_name]['perf_results'] = viewer_url
|
||||
if upload_failure:
|
||||
logdog_dict[base_benchmark_name]['upload_failed'] = 'True'
|
||||
|
||||
|
||||
def print_duration(step, start, end):
|
||||
logging.info('Duration of %s: %d seconds' % (step, end - start))
|
||||
|
||||
|
||||
def main():
|
||||
""" See collect_task.collect_task for more on the merge script API. """
|
||||
logging.info(sys.argv)
|
||||
parser = argparse.ArgumentParser()
|
||||
# configuration-name (previously perf-id) is the name of bot the tests run on
|
||||
# For example, buildbot-test is the name of the android-go-perf bot
|
||||
# configuration-name and results-url are set in the json file which is going
|
||||
# away tools/perf/core/chromium.perf.fyi.extras.json
|
||||
parser.add_argument('--configuration-name', help=argparse.SUPPRESS)
|
||||
|
||||
parser.add_argument('--build-properties', help=argparse.SUPPRESS)
|
||||
parser.add_argument('--summary-json', help=argparse.SUPPRESS)
|
||||
parser.add_argument('--task-output-dir', help=argparse.SUPPRESS)
|
||||
parser.add_argument('-o', '--output-json', required=True, help=argparse.SUPPRESS)
|
||||
parser.add_argument(
|
||||
'--skip-perf',
|
||||
action='store_true',
|
||||
help='In lightweight mode, using --skip-perf will skip the performance'
|
||||
' data handling.')
|
||||
parser.add_argument(
|
||||
'--lightweight',
|
||||
action='store_true',
|
||||
help='Choose the lightweight mode in which the perf result handling'
|
||||
' is performed on a separate VM.')
|
||||
parser.add_argument('json_files', nargs='*', help=argparse.SUPPRESS)
|
||||
parser.add_argument(
|
||||
'--smoke-test-mode',
|
||||
action='store_true',
|
||||
help='This test should be run in smoke test mode'
|
||||
' meaning it does not upload to the perf dashboard')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
output_results_dir = tempfile.mkdtemp('outputresults')
|
||||
try:
|
||||
return_code, _ = process_perf_results(args.output_json, args.configuration_name,
|
||||
args.build_properties, args.task_output_dir,
|
||||
args.smoke_test_mode, output_results_dir,
|
||||
args.lightweight, args.skip_perf)
|
||||
return return_code
|
||||
finally:
|
||||
shutil.rmtree(output_results_dir)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main())
|
||||
Reference in New Issue
Block a user