mirror of
https://github.com/godotengine/godot-angle-static.git
synced 2026-01-04 22:09:59 +03:00
Add tools/perf essential Python files.
While waiting for the full mirror we can simply duplicate the minimal necessary files for angle_perftests to run. Test: mb.py zip Bug: angleproject:5114 Change-Id: I1847909cb78b32efed26a284fdfcd40ed31d7b4b Reviewed-on: https://chromium-review.googlesource.com/c/angle/angle/+/2862922 Reviewed-by: Yuly Novikov <ynovikov@chromium.org> Commit-Queue: Jamie Madill <jmadill@chromium.org>
This commit is contained in:
3
tools/perf/core/__init__.py
Normal file
3
tools/perf/core/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
# Copyright 2015 The Chromium Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
90
tools/perf/core/path_util.py
Normal file
90
tools/perf/core/path_util.py
Normal file
@@ -0,0 +1,90 @@
|
||||
# Copyright 2015 The Chromium Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
import contextlib
|
||||
import os
|
||||
import sys
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def SysPath(path, position=None):
|
||||
if position is None:
|
||||
sys.path.append(path)
|
||||
else:
|
||||
sys.path.insert(position, path)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
if sys.path[-1] == path:
|
||||
sys.path.pop()
|
||||
else:
|
||||
sys.path.remove(path)
|
||||
|
||||
|
||||
def GetChromiumSrcDir():
|
||||
return os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))
|
||||
|
||||
|
||||
def GetTelemetryDir():
|
||||
return os.path.join(GetChromiumSrcDir(), 'third_party', 'catapult', 'telemetry')
|
||||
|
||||
|
||||
def GetTracingDir():
|
||||
return os.path.join(GetChromiumSrcDir(), 'third_party', 'catapult', 'tracing')
|
||||
|
||||
|
||||
def GetPyUtilsDir():
|
||||
return os.path.join(GetChromiumSrcDir(), 'third_party', 'catapult', 'common', 'py_utils')
|
||||
|
||||
|
||||
def GetPerfDir():
|
||||
return os.path.join(GetChromiumSrcDir(), 'tools', 'perf')
|
||||
|
||||
|
||||
def GetPerfStorySetsDir():
|
||||
return os.path.join(GetPerfDir(), 'page_sets')
|
||||
|
||||
|
||||
def GetOfficialBenchmarksDir():
|
||||
return os.path.join(GetPerfDir(), 'benchmarks')
|
||||
|
||||
|
||||
def GetContribDir():
|
||||
return os.path.join(GetPerfDir(), 'contrib')
|
||||
|
||||
|
||||
def GetAndroidPylibDir():
|
||||
return os.path.join(GetChromiumSrcDir(), 'build', 'android')
|
||||
|
||||
|
||||
def GetVariationsDir():
|
||||
return os.path.join(GetChromiumSrcDir(), 'tools', 'variations')
|
||||
|
||||
|
||||
def AddTelemetryToPath():
|
||||
telemetry_path = GetTelemetryDir()
|
||||
if telemetry_path not in sys.path:
|
||||
sys.path.insert(1, telemetry_path)
|
||||
|
||||
|
||||
def AddTracingToPath():
|
||||
tracing_path = GetTracingDir()
|
||||
if tracing_path not in sys.path:
|
||||
sys.path.insert(1, tracing_path)
|
||||
|
||||
|
||||
def AddPyUtilsToPath():
|
||||
py_utils_dir = GetPyUtilsDir()
|
||||
if py_utils_dir not in sys.path:
|
||||
sys.path.insert(1, py_utils_dir)
|
||||
|
||||
|
||||
def AddAndroidPylibToPath():
|
||||
android_pylib_path = GetAndroidPylibDir()
|
||||
if android_pylib_path not in sys.path:
|
||||
sys.path.insert(1, android_pylib_path)
|
||||
|
||||
|
||||
def GetExpectationsPath():
|
||||
return os.path.join(GetPerfDir(), 'expectations.config')
|
||||
349
tools/perf/core/results_merger.py
Normal file
349
tools/perf/core/results_merger.py
Normal file
@@ -0,0 +1,349 @@
|
||||
# Copyright 2016 The Chromium Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
# This file was copy-pasted over from:
|
||||
# //build/scripts/slave/recipe_modules/swarming/resources/results_merger.py
|
||||
|
||||
# This file is responsbile for merging JSON test results in both the simplified
|
||||
# JSON format and the Chromium JSON test results format version 3.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import copy
|
||||
import json
|
||||
import sys
|
||||
|
||||
# These fields must appear in the test result output
|
||||
REQUIRED = {
|
||||
'interrupted',
|
||||
'num_failures_by_type',
|
||||
'seconds_since_epoch',
|
||||
'tests',
|
||||
}
|
||||
|
||||
# These fields are optional, but must have the same value on all shards
|
||||
OPTIONAL_MATCHING = (
|
||||
'builder_name',
|
||||
'build_number',
|
||||
'chromium_revision',
|
||||
'has_pretty_patch',
|
||||
'has_wdiff',
|
||||
'path_delimiter',
|
||||
'pixel_tests_enabled',
|
||||
'random_order_seed',
|
||||
)
|
||||
|
||||
OPTIONAL_IGNORED = ('layout_tests_dir',)
|
||||
|
||||
# These fields are optional and will be summed together
|
||||
OPTIONAL_COUNTS = (
|
||||
'fixable',
|
||||
'num_flaky',
|
||||
'num_passes',
|
||||
'num_regressions',
|
||||
'skipped',
|
||||
'skips',
|
||||
)
|
||||
|
||||
|
||||
class MergeException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def merge_test_results(shard_results_list, test_cross_device=False):
|
||||
""" Merge list of results.
|
||||
|
||||
Args:
|
||||
shard_results_list: list of results to merge. All the results must have the
|
||||
same format. Supported format are simplified JSON format & Chromium JSON
|
||||
test results format version 3 (see
|
||||
https://www.chromium.org/developers/the-json-test-results-format)
|
||||
test_cross_device: If true, some tests are running in multiple shards. This
|
||||
requires some extra handling on merging the values under 'tests'.
|
||||
|
||||
Returns:
|
||||
a dictionary that represent the merged results. Its format follow the same
|
||||
format of all results in |shard_results_list|.
|
||||
"""
|
||||
shard_results_list = [x for x in shard_results_list if x]
|
||||
if not shard_results_list:
|
||||
return {}
|
||||
|
||||
if 'seconds_since_epoch' in shard_results_list[0]:
|
||||
return _merge_json_test_result_format(shard_results_list, test_cross_device)
|
||||
else:
|
||||
return _merge_simplified_json_format(shard_results_list)
|
||||
|
||||
|
||||
def _merge_simplified_json_format(shard_results_list):
|
||||
# This code is specialized to the "simplified" JSON format that used to be
|
||||
# the standard for recipes.
|
||||
|
||||
# These are the only keys we pay attention to in the output JSON.
|
||||
merged_results = {
|
||||
'successes': [],
|
||||
'failures': [],
|
||||
'valid': True,
|
||||
}
|
||||
|
||||
for result_json in shard_results_list:
|
||||
successes = result_json.get('successes', [])
|
||||
failures = result_json.get('failures', [])
|
||||
valid = result_json.get('valid', True)
|
||||
|
||||
if (not isinstance(successes, list) or not isinstance(failures, list) or
|
||||
not isinstance(valid, bool)):
|
||||
raise MergeException('Unexpected value type in %s' % result_json) # pragma: no cover
|
||||
|
||||
merged_results['successes'].extend(successes)
|
||||
merged_results['failures'].extend(failures)
|
||||
merged_results['valid'] = merged_results['valid'] and valid
|
||||
return merged_results
|
||||
|
||||
|
||||
def _merge_json_test_result_format(shard_results_list, test_cross_device=False):
|
||||
# This code is specialized to the Chromium JSON test results format version 3:
|
||||
# https://www.chromium.org/developers/the-json-test-results-format
|
||||
|
||||
# These are required fields for the JSON test result format version 3.
|
||||
merged_results = {
|
||||
'tests': {},
|
||||
'interrupted': False,
|
||||
'version': 3,
|
||||
'seconds_since_epoch': float('inf'),
|
||||
'num_failures_by_type': {}
|
||||
}
|
||||
|
||||
# To make sure that we don't mutate existing shard_results_list.
|
||||
shard_results_list = copy.deepcopy(shard_results_list)
|
||||
for result_json in shard_results_list:
|
||||
# TODO(tansell): check whether this deepcopy is actually necessary.
|
||||
result_json = copy.deepcopy(result_json)
|
||||
|
||||
# Check the version first
|
||||
version = result_json.pop('version', -1)
|
||||
if version != 3:
|
||||
raise MergeException( # pragma: no cover (covered by
|
||||
# results_merger_unittest).
|
||||
'Unsupported version %s. Only version 3 is supported' % version)
|
||||
|
||||
# Check the results for each shard have the required keys
|
||||
missing = REQUIRED - set(result_json)
|
||||
if missing:
|
||||
raise MergeException( # pragma: no cover (covered by
|
||||
# results_merger_unittest).
|
||||
'Invalid json test results (missing %s)' % missing)
|
||||
|
||||
# Curry merge_values for this result_json.
|
||||
# pylint: disable=cell-var-from-loop
|
||||
merge = lambda key, merge_func: merge_value(result_json, merged_results, key, merge_func)
|
||||
|
||||
if test_cross_device:
|
||||
# Results from the same test(story) may be found on different
|
||||
# shards(devices). We need to handle the merging on story level.
|
||||
merge('tests', merge_tries_v2)
|
||||
else:
|
||||
# Traverse the result_json's test trie & merged_results's test tries in
|
||||
# DFS order & add the n to merged['tests'].
|
||||
merge('tests', merge_tries)
|
||||
|
||||
# If any were interrupted, we are interrupted.
|
||||
merge('interrupted', lambda x, y: x | y)
|
||||
|
||||
# Use the earliest seconds_since_epoch value
|
||||
merge('seconds_since_epoch', min)
|
||||
|
||||
# Sum the number of failure types
|
||||
merge('num_failures_by_type', sum_dicts)
|
||||
|
||||
# Optional values must match
|
||||
for optional_key in OPTIONAL_MATCHING:
|
||||
if optional_key not in result_json:
|
||||
continue
|
||||
|
||||
if optional_key not in merged_results:
|
||||
# Set this value to None, then blindly copy over it.
|
||||
merged_results[optional_key] = None
|
||||
merge(optional_key, lambda src, dst: src)
|
||||
else:
|
||||
merge(optional_key, ensure_match)
|
||||
|
||||
# Optional values ignored
|
||||
for optional_key in OPTIONAL_IGNORED:
|
||||
if optional_key in result_json:
|
||||
merged_results[optional_key] = result_json.pop(
|
||||
# pragma: no cover (covered by
|
||||
# results_merger_unittest).
|
||||
optional_key)
|
||||
|
||||
# Sum optional value counts
|
||||
for count_key in OPTIONAL_COUNTS:
|
||||
if count_key in result_json: # pragma: no cover
|
||||
# TODO(mcgreevy): add coverage.
|
||||
merged_results.setdefault(count_key, 0)
|
||||
merge(count_key, lambda a, b: a + b)
|
||||
|
||||
if result_json:
|
||||
raise MergeException( # pragma: no cover (covered by
|
||||
# results_merger_unittest).
|
||||
'Unmergable values %s' % list(result_json.keys()))
|
||||
|
||||
return merged_results
|
||||
|
||||
|
||||
def merge_tries(source, dest):
|
||||
""" Merges test tries.
|
||||
|
||||
This is intended for use as a merge_func parameter to merge_value.
|
||||
|
||||
Args:
|
||||
source: A result json test trie.
|
||||
dest: A json test trie merge destination.
|
||||
"""
|
||||
# merge_tries merges source into dest by performing a lock-step depth-first
|
||||
# traversal of dest and source.
|
||||
# pending_nodes contains a list of all sub-tries which have been reached but
|
||||
# need further merging.
|
||||
# Each element consists of a trie prefix, and a sub-trie from each of dest
|
||||
# and source which is reached via that prefix.
|
||||
pending_nodes = [('', dest, source)]
|
||||
while pending_nodes:
|
||||
prefix, dest_node, curr_node = pending_nodes.pop()
|
||||
for k, v in curr_node.items():
|
||||
if k in dest_node:
|
||||
if not isinstance(v, dict):
|
||||
raise MergeException('%s:%s: %r not mergable, curr_node: %r\ndest_node: %r' %
|
||||
(prefix, k, v, curr_node, dest_node))
|
||||
pending_nodes.append(("%s:%s" % (prefix, k), dest_node[k], v))
|
||||
else:
|
||||
dest_node[k] = v
|
||||
return dest
|
||||
|
||||
|
||||
def merge_tries_v2(source, dest):
|
||||
""" Merges test tries, and adds support for merging results for the same story
|
||||
from different devices, which is not supported on v1.
|
||||
|
||||
This is intended for use as a merge_func parameter to merge_value.
|
||||
|
||||
Args:
|
||||
source: A result json test trie.
|
||||
dest: A json test trie merge destination.
|
||||
"""
|
||||
# merge_tries merges source into dest by performing a lock-step depth-first
|
||||
# traversal of dest and source.
|
||||
# pending_nodes contains a list of all sub-tries which have been reached but
|
||||
# need further merging.
|
||||
# Each element consists of a trie prefix, and a sub-trie from each of dest
|
||||
# and source which is reached via that prefix.
|
||||
pending_nodes = [('', dest, source)]
|
||||
while pending_nodes:
|
||||
prefix, dest_node, curr_node = pending_nodes.pop()
|
||||
for k, v in curr_node.items():
|
||||
if k in dest_node:
|
||||
if not isinstance(v, dict):
|
||||
raise MergeException('%s:%s: %r not mergable, curr_node: %r\ndest_node: %r' %
|
||||
(prefix, k, v, curr_node, dest_node))
|
||||
elif 'actual' in v and 'expected' in v:
|
||||
# v is test result of a story name which is already in dest
|
||||
_merging_cross_device_results(v, dest_node[k])
|
||||
else:
|
||||
pending_nodes.append(("%s:%s" % (prefix, k), dest_node[k], v))
|
||||
else:
|
||||
dest_node[k] = v
|
||||
return dest
|
||||
|
||||
|
||||
def _merging_cross_device_results(src, dest):
|
||||
# 1. Merge the 'actual' field and update the is_unexpected based on new values
|
||||
dest['actual'] += ' %s' % src['actual']
|
||||
if any(actual != dest['expected'] for actual in dest['actual'].split()):
|
||||
dest['is_unexpected'] = True
|
||||
# 2. append each item under the 'artifacts' and 'times'.
|
||||
if 'artifacts' in src:
|
||||
if 'artifacts' in dest:
|
||||
for artifact, artifact_list in src['artifacts'].items():
|
||||
if artifact in dest['artifacts']:
|
||||
dest['artifacts'][artifact] += artifact_list
|
||||
else:
|
||||
dest['artifacts'][artifact] = artifact_list
|
||||
else:
|
||||
dest['artifacts'] = src['artifacts']
|
||||
if 'times' in src:
|
||||
if 'times' in dest:
|
||||
dest['times'] += src['times']
|
||||
else:
|
||||
dest['time'] = src['time']
|
||||
dest['times'] = src['times']
|
||||
# 3. remove the 'shard' because now the results are from multiple shards.
|
||||
if 'shard' in dest:
|
||||
del dest['shard']
|
||||
|
||||
|
||||
def ensure_match(source, dest):
|
||||
""" Returns source if it matches dest.
|
||||
|
||||
This is intended for use as a merge_func parameter to merge_value.
|
||||
|
||||
Raises:
|
||||
MergeException if source != dest
|
||||
"""
|
||||
if source != dest:
|
||||
raise MergeException( # pragma: no cover (covered by
|
||||
# results_merger_unittest).
|
||||
"Values don't match: %s, %s" % (source, dest))
|
||||
return source
|
||||
|
||||
|
||||
def sum_dicts(source, dest):
|
||||
""" Adds values from source to corresponding values in dest.
|
||||
|
||||
This is intended for use as a merge_func parameter to merge_value.
|
||||
"""
|
||||
for k, v in source.items():
|
||||
dest.setdefault(k, 0)
|
||||
dest[k] += v
|
||||
|
||||
return dest
|
||||
|
||||
|
||||
def merge_value(source, dest, key, merge_func):
|
||||
""" Merges a value from source to dest.
|
||||
|
||||
The value is deleted from source.
|
||||
|
||||
Args:
|
||||
source: A dictionary from which to pull a value, identified by key.
|
||||
dest: The dictionary into to which the value is to be merged.
|
||||
key: The key which identifies the value to be merged.
|
||||
merge_func(src, dst): A function which merges its src into dst,
|
||||
and returns the result. May modify dst. May raise a MergeException.
|
||||
|
||||
Raises:
|
||||
MergeException if the values can not be merged.
|
||||
"""
|
||||
try:
|
||||
dest[key] = merge_func(source[key], dest[key])
|
||||
except MergeException as e:
|
||||
e.message = "MergeFailure for %s\n%s" % (key, e.message)
|
||||
e.args = tuple([e.message] + list(e.args[1:]))
|
||||
raise
|
||||
del source[key]
|
||||
|
||||
|
||||
def main(files):
|
||||
if len(files) < 2:
|
||||
sys.stderr.write("Not enough JSON files to merge.\n")
|
||||
return 1
|
||||
sys.stderr.write('Starting with %s\n' % files[0])
|
||||
result = json.load(open(files[0]))
|
||||
for f in files[1:]:
|
||||
sys.stderr.write('Merging %s\n' % f)
|
||||
result = merge_test_results([result, json.load(open(f))])
|
||||
print(json.dumps(result))
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main(sys.argv[1:]))
|
||||
263
tools/perf/generate_legacy_perf_dashboard_json.py
Executable file
263
tools/perf/generate_legacy_perf_dashboard_json.py
Executable file
@@ -0,0 +1,263 @@
|
||||
#!/usr/bin/env vpython
|
||||
# Copyright 2016 The Chromium Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
""" Generates legacy perf dashboard json from non-telemetry based perf tests.
|
||||
Taken from chromium/build/scripts/slave/performance_log_processory.py
|
||||
(https://goo.gl/03SQRk)
|
||||
"""
|
||||
|
||||
import collections
|
||||
import json
|
||||
import math
|
||||
import logging
|
||||
import re
|
||||
|
||||
|
||||
class LegacyResultsProcessor(object):
|
||||
"""Class for any log processor expecting standard data to be graphed.
|
||||
|
||||
The log will be parsed looking for any lines of the forms:
|
||||
<*>RESULT <graph_name>: <trace_name>= <value> <units>
|
||||
or
|
||||
<*>RESULT <graph_name>: <trace_name>= [<value>,value,value,...] <units>
|
||||
or
|
||||
<*>RESULT <graph_name>: <trace_name>= {<mean>, <std deviation>} <units>
|
||||
|
||||
For example,
|
||||
*RESULT vm_final_browser: OneTab= 8488 kb
|
||||
RESULT startup: ref= [167.00,148.00,146.00,142.00] ms
|
||||
RESULT TabCapturePerformance_foo: Capture= {30.7, 1.45} ms
|
||||
|
||||
The leading * is optional; it indicates that the data from that line should
|
||||
be considered "important", which may mean for example that it's graphed by
|
||||
default.
|
||||
|
||||
If multiple values are given in [], their mean and (sample) standard
|
||||
deviation will be written; if only one value is given, that will be written.
|
||||
A trailing comma is permitted in the list of values.
|
||||
|
||||
NOTE: All lines except for RESULT lines are ignored, including the Avg and
|
||||
Stddev lines output by Telemetry!
|
||||
|
||||
Any of the <fields> except <value> may be empty, in which case the
|
||||
not-terribly-useful defaults will be used. The <graph_name> and <trace_name>
|
||||
should not contain any spaces, colons (:) nor equals-signs (=). Furthermore,
|
||||
the <trace_name> will be used on the waterfall display, so it should be kept
|
||||
short. If the trace_name ends with '_ref', it will be interpreted as a
|
||||
reference value, and shown alongside the corresponding main value on the
|
||||
waterfall.
|
||||
|
||||
Semantic note: The terms graph and chart are used interchangeably here.
|
||||
"""
|
||||
|
||||
RESULTS_REGEX = re.compile(r'(?P<IMPORTANT>\*)?RESULT '
|
||||
r'(?P<GRAPH>[^:]*): (?P<TRACE>[^=]*)= '
|
||||
r'(?P<VALUE>[\{\[]?[-\d\., ]+[\}\]]?)('
|
||||
r' ?(?P<UNITS>.+))?')
|
||||
# TODO(eyaich): Determine if this format is still used by any perf tests
|
||||
HISTOGRAM_REGEX = re.compile(r'(?P<IMPORTANT>\*)?HISTOGRAM '
|
||||
r'(?P<GRAPH>[^:]*): (?P<TRACE>[^=]*)= '
|
||||
r'(?P<VALUE_JSON>{.*})(?P<UNITS>.+)?')
|
||||
|
||||
def __init__(self):
|
||||
# A dict of Graph objects, by name.
|
||||
self._graphs = {}
|
||||
# A dict mapping output file names to lists of lines in a file.
|
||||
self._output = {}
|
||||
self._percentiles = [.1, .25, .5, .75, .90, .95, .99]
|
||||
|
||||
class Trace(object):
|
||||
"""Encapsulates data for one trace. Here, this means one point."""
|
||||
|
||||
def __init__(self):
|
||||
self.important = False
|
||||
self.values = []
|
||||
self.mean = 0.0
|
||||
self.stddev = 0.0
|
||||
|
||||
def __str__(self):
|
||||
result = _FormatHumanReadable(self.mean)
|
||||
if self.stddev:
|
||||
result += '+/-%s' % _FormatHumanReadable(self.stddev)
|
||||
return result
|
||||
|
||||
class Graph(object):
|
||||
"""Encapsulates a set of points that should appear on the same graph."""
|
||||
|
||||
def __init__(self):
|
||||
self.units = None
|
||||
self.traces = {}
|
||||
|
||||
def IsImportant(self):
|
||||
"""A graph is considered important if any of its traces is important."""
|
||||
for trace in self.traces.values():
|
||||
if trace.important:
|
||||
return True
|
||||
return False
|
||||
|
||||
def BuildTracesDict(self):
|
||||
"""Returns a dictionary mapping trace names to [value, stddev]."""
|
||||
traces_dict = {}
|
||||
for name, trace in self.traces.items():
|
||||
traces_dict[name] = [str(trace.mean), str(trace.stddev)]
|
||||
return traces_dict
|
||||
|
||||
def GenerateJsonResults(self, filename):
|
||||
# Iterate through the file and process each output line
|
||||
with open(filename) as f:
|
||||
for line in f.readlines():
|
||||
self.ProcessLine(line)
|
||||
# After all results have been seen, generate the graph json data
|
||||
return self.GenerateGraphJson()
|
||||
|
||||
def _PrependLog(self, filename, data):
|
||||
"""Prepends some data to an output file."""
|
||||
self._output[filename] = data + self._output.get(filename, [])
|
||||
|
||||
def ProcessLine(self, line):
|
||||
"""Processes one result line, and updates the state accordingly."""
|
||||
results_match = self.RESULTS_REGEX.search(line)
|
||||
histogram_match = self.HISTOGRAM_REGEX.search(line)
|
||||
if results_match:
|
||||
self._ProcessResultLine(results_match)
|
||||
elif histogram_match:
|
||||
raise Exception("Error: Histogram results parsing not supported yet")
|
||||
|
||||
def _ProcessResultLine(self, line_match):
|
||||
"""Processes a line that matches the standard RESULT line format.
|
||||
|
||||
Args:
|
||||
line_match: A MatchObject as returned by re.search.
|
||||
"""
|
||||
match_dict = line_match.groupdict()
|
||||
graph_name = match_dict['GRAPH'].strip()
|
||||
trace_name = match_dict['TRACE'].strip()
|
||||
|
||||
graph = self._graphs.get(graph_name, self.Graph())
|
||||
graph.units = (match_dict['UNITS'] or '').strip()
|
||||
trace = graph.traces.get(trace_name, self.Trace())
|
||||
value = match_dict['VALUE']
|
||||
trace.important = match_dict['IMPORTANT'] or False
|
||||
|
||||
# Compute the mean and standard deviation for a list or a histogram,
|
||||
# or the numerical value of a scalar value.
|
||||
if value.startswith('['):
|
||||
try:
|
||||
value_list = [float(x) for x in value.strip('[],').split(',')]
|
||||
except ValueError:
|
||||
# Report, but ignore, corrupted data lines. (Lines that are so badly
|
||||
# broken that they don't even match the RESULTS_REGEX won't be
|
||||
# detected.)
|
||||
logging.warning("Bad test output: '%s'" % value.strip())
|
||||
return
|
||||
trace.values += value_list
|
||||
trace.mean, trace.stddev, filedata = self._CalculateStatistics(
|
||||
trace.values, trace_name)
|
||||
assert filedata is not None
|
||||
for filename in filedata:
|
||||
self._PrependLog(filename, filedata[filename])
|
||||
elif value.startswith('{'):
|
||||
stripped = value.strip('{},')
|
||||
try:
|
||||
trace.mean, trace.stddev = [float(x) for x in stripped.split(',')]
|
||||
except ValueError:
|
||||
logging.warning("Bad test output: '%s'" % value.strip())
|
||||
return
|
||||
else:
|
||||
try:
|
||||
trace.values.append(float(value))
|
||||
trace.mean, trace.stddev, filedata = self._CalculateStatistics(
|
||||
trace.values, trace_name)
|
||||
assert filedata is not None
|
||||
for filename in filedata:
|
||||
self._PrependLog(filename, filedata[filename])
|
||||
except ValueError:
|
||||
logging.warning("Bad test output: '%s'" % value.strip())
|
||||
return
|
||||
|
||||
graph.traces[trace_name] = trace
|
||||
self._graphs[graph_name] = graph
|
||||
|
||||
def GenerateGraphJson(self):
|
||||
"""Writes graph json for each graph seen.
|
||||
"""
|
||||
charts = {}
|
||||
for graph_name, graph in self._graphs.items():
|
||||
traces = graph.BuildTracesDict()
|
||||
|
||||
# Traces should contain exactly two elements: [mean, stddev].
|
||||
for _, trace in traces.items():
|
||||
assert len(trace) == 2
|
||||
|
||||
graph_dict = collections.OrderedDict([
|
||||
('traces', traces),
|
||||
('units', str(graph.units)),
|
||||
])
|
||||
|
||||
# Include a sorted list of important trace names if there are any.
|
||||
important = [t for t in graph.traces.keys() if graph.traces[t].important]
|
||||
if important:
|
||||
graph_dict['important'] = sorted(important)
|
||||
|
||||
charts[graph_name] = graph_dict
|
||||
return json.dumps(charts)
|
||||
|
||||
# _CalculateStatistics needs to be a member function.
|
||||
# pylint: disable=R0201
|
||||
# Unused argument value_list.
|
||||
# pylint: disable=W0613
|
||||
def _CalculateStatistics(self, value_list, trace_name):
|
||||
"""Returns a tuple with some statistics based on the given value list.
|
||||
|
||||
This method may be overridden by subclasses wanting a different standard
|
||||
deviation calcuation (or some other sort of error value entirely).
|
||||
|
||||
Args:
|
||||
value_list: the list of values to use in the calculation
|
||||
trace_name: the trace that produced the data (not used in the base
|
||||
implementation, but subclasses may use it)
|
||||
|
||||
Returns:
|
||||
A 3-tuple - mean, standard deviation, and a dict which is either
|
||||
empty or contains information about some file contents.
|
||||
"""
|
||||
n = len(value_list)
|
||||
if n == 0:
|
||||
return 0.0, 0.0, {}
|
||||
mean = float(sum(value_list)) / n
|
||||
variance = sum([(element - mean)**2 for element in value_list]) / n
|
||||
stddev = math.sqrt(variance)
|
||||
|
||||
return mean, stddev, {}
|
||||
|
||||
|
||||
def _FormatHumanReadable(number):
|
||||
"""Formats a float into three significant figures, using metric suffixes.
|
||||
|
||||
Only m, k, and M prefixes (for 1/1000, 1000, and 1,000,000) are used.
|
||||
Examples:
|
||||
0.0387 => 38.7m
|
||||
1.1234 => 1.12
|
||||
10866 => 10.8k
|
||||
682851200 => 683M
|
||||
"""
|
||||
metric_prefixes = {-3: 'm', 0: '', 3: 'k', 6: 'M'}
|
||||
scientific = '%.2e' % float(number) # 6.83e+005
|
||||
e_idx = scientific.find('e') # 4, or 5 if negative
|
||||
digits = float(scientific[:e_idx]) # 6.83
|
||||
exponent = int(scientific[e_idx + 1:]) # int('+005') = 5
|
||||
while exponent % 3:
|
||||
digits *= 10
|
||||
exponent -= 1
|
||||
while exponent > 6:
|
||||
digits *= 10
|
||||
exponent -= 1
|
||||
while exponent < -3:
|
||||
digits /= 10
|
||||
exponent += 1
|
||||
if digits >= 100:
|
||||
# Don't append a meaningless '.0' to an integer number.
|
||||
digits = int(digits) # pylint: disable=redefined-variable-type
|
||||
# Exponent is now divisible by 3, between -3 and 6 inclusive: (-3, 0, 3, 6).
|
||||
return '%s%s' % (digits, metric_prefixes[exponent])
|
||||
Reference in New Issue
Block a user