Bug 1472803 - Add dedicated parse method draft
authorIonut Goldan <igoldan@mozilla.com>
Thu, 19 Jul 2018 14:17:31 +0300
changeset 822397 ad4645f67539f33f86ef03f5b514480d46028e12
parent 822396 ce52e9f4b14fe4d5cfc58759c1c1d718d660603f
child 822398 d6bcc66ab9899f465ef5da3aa9bfd717687c1649
push id117352
push userbmo:igoldan@mozilla.com
push dateWed, 25 Jul 2018 06:16:19 +0000
bugs1472803
milestone63.0a1
Bug 1472803 - Add dedicated parse method MozReview-Commit-ID: FEqq34xW02S
testing/raptor/raptor/output.py
--- a/testing/raptor/raptor/output.py
+++ b/testing/raptor/raptor/output.py
@@ -86,16 +86,18 @@ class Output(object):
 
                     subtests.append(new_subtest)
 
             elif test.type == "benchmark":
                 if 'speedometer' in test.measurements:
                     subtests, vals = self.parseSpeedometerOutput(test)
                 elif 'motionmark' in test.measurements:
                     subtests, vals = self.parseMotionmarkOutput(test)
+                elif 'sunspider' in test.measurements:
+                    subtests, vals = self.parseSunspiderOutput(test)
                 elif 'webaudio' in test.measurements:
                     subtests, vals = self.parseWebaudioOutput(test)
                 suite['subtests'] = subtests
 
                 # if there is more than one subtest, calculate a summary result
                 if len(subtests) > 1:
                     suite['value'] = self.construct_summary(vals, testname=test.name)
 
@@ -245,16 +247,59 @@ class Output(object):
         names.sort(reverse=True)
         for name in names:
             _subtests[name]['value'] = filter.median(_subtests[name]['replicates'])
             subtests.append(_subtests[name])
             vals.append([_subtests[name]['value'], name])
 
         return subtests, vals
 
+    def parseSunspiderOutput(self, test):
+        _subtests = {}
+        data = test.measurements['sunspider']
+        for page_cycle in data:
+            for sub, replicates in page_cycle[0].iteritems():
+                # for each pagecycle, build a list of subtests and append all related replicates
+                if sub not in _subtests.keys():
+                    # subtest not added yet, first pagecycle, so add new one
+                    _subtests[sub] = {'unit': test.unit,
+                                      'alertThreshold': float(test.alert_threshold),
+                                      'lowerIsBetter': test.lower_is_better,
+                                      'name': sub,
+                                      'replicates': []}
+                _subtests[sub]['replicates'].extend([round(x, 3) for x in replicates])
+
+        # TODO: DRY object literal
+        total_subtest = {
+            'unit': test.unit,
+            'alertThreshold': float(test.alert_threshold),
+            'lowerIsBetter': test.lower_is_better,
+            'replicates': [],
+            'name': 'benchmark_score',
+            'value': 0
+        }
+        subtests = [total_subtest]
+        vals = []
+
+        names = _subtests.keys()
+        names.sort(reverse=True)
+        for name in names:
+            _subtests[name]['value'] = average = self._average(_subtests[name]['replicates'])
+            subtests.append(_subtests[name])
+            total_subtest['value'] += average
+
+            vals.append([_subtests[name]['value'], name])
+        vals.append([total_subtest['value'], total_subtest['name']])
+
+        return subtests, vals
+
+    @classmethod
+    def _average(cls, iterable):
+        return sum(iterable)/float(len(iterable))
+
     def output(self):
         """output to file and perfherder data json """
         if self.summarized_results == {}:
             LOG.error("error: no summarized raptor results found!")
             return False
 
         if os.environ['MOZ_UPLOAD_DIR']:
             # i.e. testing/mozharness/build/raptor.json locally; in production it will