1# Copyright 2018, The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15# pylint: disable=import-outside-toplevel
16
17"""Result Reporter
18
19The result reporter formats and prints test results.
20
21----
22Example Output for command to run following tests:
23CtsAnimationTestCases:EvaluatorTest, HelloWorldTests, and WmTests
24
25Running Tests ...
26
27CtsAnimationTestCases
28---------------------
29
30android.animation.cts.EvaluatorTest.UnitTests (7 Tests)
31[1/7] android.animation.cts.EvaluatorTest#testRectEvaluator: PASSED (153ms)
32[2/7] android.animation.cts.EvaluatorTest#testIntArrayEvaluator: PASSED (0ms)
33[3/7] android.animation.cts.EvaluatorTest#testIntEvaluator: PASSED (0ms)
34[4/7] android.animation.cts.EvaluatorTest#testFloatArrayEvaluator: PASSED (1ms)
35[5/7] android.animation.cts.EvaluatorTest#testPointFEvaluator: PASSED (1ms)
36[6/7] android.animation.cts.EvaluatorTest#testArgbEvaluator: PASSED (0ms)
37[7/7] android.animation.cts.EvaluatorTest#testFloatEvaluator: PASSED (1ms)
38
39HelloWorldTests
40---------------
41
42android.test.example.helloworld.UnitTests(2 Tests)
43[1/2] android.test.example.helloworld.HelloWorldTest#testHalloWelt: PASSED (0ms)
44[2/2] android.test.example.helloworld.HelloWorldTest#testHelloWorld: PASSED
45(1ms)
46
47WmTests
48-------
49
50com.android.tradefed.targetprep.UnitTests (1 Test)
51RUNNER ERROR: com.android.tradefed.targetprep.TargetSetupError:
52Failed to install WmTests.apk on 127.0.0.1:54373. Reason:
53    error message ...
54
55
56Summary
57-------
58CtsAnimationTestCases: Passed: 7, Failed: 0
59HelloWorldTests: Passed: 2, Failed: 0
60WmTests: Passed: 0, Failed: 0 (Completed With ERRORS)
61
621 test failed
63"""
64
65from __future__ import print_function
66
67from collections import OrderedDict
68import logging
69import os
70import re
71import zipfile
72
73from atest import atest_configs
74from atest import atest_utils as au
75from atest import constants
76from atest.atest_enum import ExitCode
77from atest.test_runners import test_runner_base
78
79UNSUPPORTED_FLAG = 'UNSUPPORTED_RUNNER'
80FAILURE_FLAG = 'RUNNER_FAILURE'
81BENCHMARK_ESSENTIAL_KEYS = {
82    'repetition_index',
83    'cpu_time',
84    'name',
85    'repetitions',
86    'run_type',
87    'threads',
88    'time_unit',
89    'iterations',
90    'run_name',
91    'real_time',
92}
93# TODO(b/146875480): handle the optional benchmark events
94BENCHMARK_OPTIONAL_KEYS = {'bytes_per_second', 'label'}
95BENCHMARK_EVENT_KEYS = BENCHMARK_ESSENTIAL_KEYS.union(BENCHMARK_OPTIONAL_KEYS)
96INT_KEYS = {}
97ITER_SUMMARY = {}
98ITER_COUNTS = {}
99
100_TEST_LOG_PATH_PRINT_PREFIX = 'Test Logs have been saved in '
101
102
103class PerfInfo:
104  """Class for storing performance test of a test run."""
105
106  def __init__(self):
107    """Initialize a new instance of PerfInfo class."""
108    # perf_info: A list of benchmark_info(dict).
109    self.perf_info = []
110
111  def update_perf_info(self, test):
112    """Update perf_info with the given result of a single test.
113
114    Args:
115        test: A TestResult namedtuple.
116    """
117    all_additional_keys = set(test.additional_info.keys())
118    # Ensure every key is in all_additional_keys.
119    if not BENCHMARK_ESSENTIAL_KEYS.issubset(all_additional_keys):
120      return
121    benchmark_info = {}
122    benchmark_info['test_name'] = test.test_name
123    for key, data in test.additional_info.items():
124      if key in INT_KEYS:
125        data_to_int = data.split('.')[0]
126        benchmark_info[key] = data_to_int
127      elif key in BENCHMARK_EVENT_KEYS:
128        benchmark_info[key] = data
129    if benchmark_info:
130      self.perf_info.append(benchmark_info)
131
132  def print_perf_info(self):
133    """Print summary of a perf_info."""
134    if not self.perf_info:
135      return
136    classify_perf_info, max_len = self._classify_perf_info()
137    separator = '-' * au.get_terminal_size()[0]
138    print(separator)
139    print(
140        '{:{name}}    {:^{real_time}}    {:^{cpu_time}}    '
141        '{:>{iterations}}'.format(
142            'Benchmark',
143            'Time',
144            'CPU',
145            'Iteration',
146            name=max_len['name'] + 3,
147            real_time=max_len['real_time'] + max_len['time_unit'] + 1,
148            cpu_time=max_len['cpu_time'] + max_len['time_unit'] + 1,
149            iterations=max_len['iterations'],
150        )
151    )
152    print(separator)
153    for module_name, module_perf_info in classify_perf_info.items():
154      print('{}:'.format(module_name))
155      for benchmark_info in module_perf_info:
156        # BpfBenchMark/MapWriteNewEntry/1    1530 ns     1522 ns   460517
157        print(
158            '  #{:{name}}    {:>{real_time}} {:{time_unit}}    '
159            '{:>{cpu_time}} {:{time_unit}}    '
160            '{:>{iterations}}'.format(
161                benchmark_info['name'],
162                benchmark_info['real_time'],
163                benchmark_info['time_unit'],
164                benchmark_info['cpu_time'],
165                benchmark_info['time_unit'],
166                benchmark_info['iterations'],
167                name=max_len['name'],
168                real_time=max_len['real_time'],
169                time_unit=max_len['time_unit'],
170                cpu_time=max_len['cpu_time'],
171                iterations=max_len['iterations'],
172            )
173        )
174
175  def _classify_perf_info(self):
176    """Classify the perf_info by test module name.
177
178    Returns:
179        A tuple of (classified_perf_info, max_len), where
180        classified_perf_info: A dict of perf_info and each perf_info are
181                             belong to different modules.
182            e.g.
183                { module_name_01: [perf_info of module_1],
184                  module_name_02: [perf_info of module_2], ...}
185        max_len: A dict which stores the max length of each event.
186                 It contains the max string length of 'name', real_time',
187                 'time_unit', 'cpu_time', 'iterations'.
188            e.g.
189                {name: 56, real_time: 9, time_unit: 2, cpu_time: 8,
190                 iterations: 12}
191    """
192    module_categories = set()
193    max_len = {}
194    all_name = []
195    all_real_time = []
196    all_time_unit = []
197    all_cpu_time = []
198    all_iterations = ['Iteration']
199    for benchmark_info in self.perf_info:
200      module_categories.add(benchmark_info['test_name'].split('#')[0])
201      all_name.append(benchmark_info['name'])
202      all_real_time.append(benchmark_info['real_time'])
203      all_time_unit.append(benchmark_info['time_unit'])
204      all_cpu_time.append(benchmark_info['cpu_time'])
205      all_iterations.append(benchmark_info['iterations'])
206    classified_perf_info = {}
207    for module_name in module_categories:
208      module_perf_info = []
209      for benchmark_info in self.perf_info:
210        if benchmark_info['test_name'].split('#')[0] == module_name:
211          module_perf_info.append(benchmark_info)
212      classified_perf_info[module_name] = module_perf_info
213    max_len = {
214        'name': len(max(all_name, key=len)),
215        'real_time': len(max(all_real_time, key=len)),
216        'time_unit': len(max(all_time_unit, key=len)),
217        'cpu_time': len(max(all_cpu_time, key=len)),
218        'iterations': len(max(all_iterations, key=len)),
219    }
220    return classified_perf_info, max_len
221
222
223class RunStat:
224  """Class for storing stats of a test run."""
225
226  def __init__(
227      self, passed=0, failed=0, ignored=0, run_errors=False, assumption_failed=0
228  ):
229    """Initialize a new instance of RunStat class.
230
231    Args:
232        passed: Count of passing tests.
233        failed: Count of failed tests.
234        ignored: Count of ignored tests.
235        assumption_failed: Count of assumption failure tests.
236        run_errors: A boolean if there were run errors
237    """
238    # TODO(b/109822985): Track group and run estimated totals for updating
239    # summary line
240    self.passed = passed
241    self.failed = failed
242    self.ignored = ignored
243    self.assumption_failed = assumption_failed
244    self.perf_info = PerfInfo()
245    # Run errors are not for particular tests, they are runner errors.
246    self.run_errors = run_errors
247
248  @property
249  def total(self):
250    """Getter for total tests actually ran. Accessed via self.total"""
251    return self.passed + self.failed
252
253
254class ResultReporter:
255  """Result Reporter class.
256
257  As each test is run, the test runner will call self.process_test_result()
258  with a TestResult namedtuple that contains the following information:
259  - runner_name:   Name of the test runner
260  - group_name:    Name of the test group if any.
261                   In Tradefed that's the Module name.
262  - test_name:     Name of the test.
263                   In Tradefed that's qualified.class#Method
264  - status:        The strings FAILED or PASSED.
265  - stacktrace:    The stacktrace if the test failed.
266  - group_total:   The total tests scheduled to be run for a group.
267                   In Tradefed this is provided when the Module starts.
268  - runner_total:  The total tests scheduled to be run for the runner.
269                   In Tradefed this is not available so is None.
270
271  The Result Reporter will print the results of this test and then update
272  its stats state.
273
274  Test stats are stored in the following structure:
275  - self.run_stats: Is RunStat instance containing stats for the overall run.
276                    This include pass/fail counts across ALL test runners.
277
278  - self.runners:  Is of the form: {RunnerName: {GroupName: RunStat Instance}}
279                   Where {} is an ordered dict.
280
281                   The stats instance contains stats for each test group.
282                   If the runner doesn't support groups, then the group
283                   name will be None.
284
285  For example this could be a state of ResultReporter:
286
287  run_stats: RunStat(passed:10, failed:5)
288  runners: {'AtestTradefedTestRunner':
289                          {'Module1': RunStat(passed:1, failed:1),
290                           'Module2': RunStat(passed:0, failed:4)},
291            'RobolectricTestRunner': {None: RunStat(passed:5, failed:0)},
292            'VtsTradefedTestRunner': {'Module1': RunStat(passed:4, failed:0)}}
293  """
294
295  def __init__(self, silent=False, collect_only=False, wait_for_debugger=False):
296    """Init ResultReporter.
297
298    Args:
299        silent: A boolean of silence or not.
300    """
301    self.run_stats = RunStat()
302    self.runners = OrderedDict()
303    self.failed_tests = []
304    self.all_test_results = []
305    self.pre_test = None
306    self.log_path = None
307    self.silent = silent
308    self.rerun_options = ''
309    self.collect_only = collect_only
310    self.test_result_link = None
311    self.device_count = 0
312    self.wait_for_debugger = wait_for_debugger
313
314  def get_test_results_by_runner(self, runner_name):
315    return [t for t in self.all_test_results if t.runner_name == runner_name]
316
317  def process_test_result(self, test):
318    """Given the results of a single test, update stats and print results.
319
320    Args:
321        test: A TestResult namedtuple.
322    """
323    if test.runner_name not in self.runners:
324      self.runners[test.runner_name] = OrderedDict()
325    assert self.runners[test.runner_name] != FAILURE_FLAG
326    self.all_test_results.append(test)
327    if test.group_name not in self.runners[test.runner_name]:
328      self.runners[test.runner_name][test.group_name] = RunStat()
329      self._print_group_title(test)
330    self._update_stats(test, self.runners[test.runner_name][test.group_name])
331    self._print_result(test)
332
333  def runner_failure(self, runner_name, failure_msg):
334    """Report a runner failure.
335
336    Use instead of process_test_result() when runner fails separate from
337    any particular test, e.g. during setup of runner.
338
339    Args:
340        runner_name: A string of the name of the runner.
341        failure_msg: A string of the failure message to pass to user.
342    """
343    self.runners[runner_name] = FAILURE_FLAG
344
345    print('\n', runner_name, '\n', '-' * len(runner_name), sep='')
346    print(
347        au.mark_red(
348            'Runner encountered a critical failure. Skipping.\nFAILURE: %s'
349            % failure_msg
350        )
351    )
352
353  def register_unsupported_runner(self, runner_name):
354    """Register an unsupported runner.
355
356    Prints the following to the screen:
357
358    RunnerName
359    ----------
360    This runner does not support normal results formatting.
361    Below is the raw output of the test runner.
362
363    RAW OUTPUT:
364    <Raw Runner Output>
365
366    Args:
367       runner_name: A String of the test runner's name.
368    """
369    assert runner_name not in self.runners
370    self.runners[runner_name] = UNSUPPORTED_FLAG
371    print('\n', runner_name, '\n', '-' * len(runner_name), sep='')
372    print(
373        'This runner does not support normal results formatting. Below '
374        'is the raw output of the test runner.\n\nRAW OUTPUT:'
375    )
376
377  def print_starting_text(self):
378    """Print starting text for running tests."""
379    if self.wait_for_debugger:
380      print(
381          au.mark_red(
382              '\nDebugging Tests [you may need to attach a debugger for the'
383              ' process to continue...]'
384          )
385      )
386    else:
387      print(au.mark_cyan('\nRunning Tests...'))
388
389  def set_current_iteration_summary(self, iteration_num: int) -> None:
390    """Add the given iteration's current summary to the list of its existing summaries."""
391    run_summary = []
392    for runner_name, groups in self.runners.items():
393      for group_name, stats in groups.items():
394        name = group_name if group_name else runner_name
395        summary = self.process_summary(name, stats)
396        run_summary.append(summary)
397    summary_list = ITER_SUMMARY.get(iteration_num, [])
398    summary_list.extend(run_summary)
399    ITER_SUMMARY[iteration_num] = summary_list
400
401  def get_iterations_summary(self) -> None:
402    """Print the combined summary of all the iterations."""
403    total_summary = ''
404    for key, value in ITER_COUNTS.items():
405      total_summary += '%s: %s: %s, %s: %s, %s: %s, %s: %s\n' % (
406          key,
407          'Passed',
408          value.get('passed', 0),
409          'Failed',
410          value.get('failed', 0),
411          'Ignored',
412          value.get('ignored', 0),
413          'Assumption_failed',
414          value.get('assumption_failed', 0),
415      )
416    return f"{au.delimiter('-', 7)}\nITERATIONS RESULT\n{total_summary}"
417
418  # pylint: disable=too-many-branches
419  def print_summary(self):
420    """Print summary of all test runs.
421
422    Returns:
423        0 if all tests pass, non-zero otherwise.
424    """
425    if self.collect_only:
426      return self.print_collect_tests()
427    tests_ret = ExitCode.SUCCESS
428    if not self.runners:
429      return tests_ret
430    if not self.device_count:
431      device_detail = ''
432    elif self.device_count == 1:
433      device_detail = '(Test executed with 1 device.)'
434    else:
435      device_detail = f'(Test executed with {self.device_count} devices.)'
436    print('\n{}'.format(au.mark_cyan(f'Summary {device_detail}')))
437    print(au.delimiter('-', 7))
438
439    multi_iterations = len(ITER_SUMMARY) > 1
440    for iter_num, summary_list in ITER_SUMMARY.items():
441      if multi_iterations:
442        print(au.mark_blue('ITERATION %s' % (int(iter_num) + 1)))
443      for summary in summary_list:
444        print(summary)
445    if multi_iterations:
446      print(self.get_iterations_summary())
447
448    failed_sum = len(self.failed_tests)
449    for runner_name, groups in self.runners.items():
450      if groups == UNSUPPORTED_FLAG:
451        print(
452            f'Pretty output does not support {runner_name}. '
453            r'See raw output above.'
454        )
455        continue
456      if groups == FAILURE_FLAG:
457        tests_ret = ExitCode.TEST_FAILURE
458        print(runner_name, 'Crashed. No results to report.')
459        failed_sum += 1
460        continue
461      for group_name, stats in groups.items():
462        name = group_name if group_name else runner_name
463        summary = self.process_summary(name, stats)
464        if stats.failed > 0 or stats.run_errors:
465          tests_ret = ExitCode.TEST_FAILURE
466          if stats.run_errors:
467            failed_sum += 1 if not stats.failed else 0
468        if not ITER_SUMMARY:
469          print(summary)
470
471    self.run_stats.perf_info.print_perf_info()
472    print()
473    if not UNSUPPORTED_FLAG in self.runners.values():
474      if tests_ret == ExitCode.SUCCESS:
475        print(au.mark_green('All tests passed!'))
476      else:
477        message = '%d %s failed' % (
478            failed_sum,
479            'tests' if failed_sum > 1 else 'test',
480        )
481        print(au.mark_red(message))
482        print('-' * len(message))
483        self.print_failed_tests()
484    if self.log_path:
485      # Print aggregate result if any.
486      self._print_aggregate_test_metrics()
487      print(f'{_TEST_LOG_PATH_PRINT_PREFIX}{self.log_path}')
488    # TODO(b/174535786) Error handling while uploading test results has
489    # unexpected exceptions.
490    # TODO (b/174627499) Saving this information in atest history.
491    if self.test_result_link:
492      print('Test Result uploaded to %s' % au.mark_green(self.test_result_link))
493    return tests_ret
494
495  def _print_aggregate_test_metrics(self):
496    """Print aggregate test metrics text content if metric files exist."""
497    metric_files = au.find_files(
498        self.log_path, file_name='*_aggregate_test_metrics_*.txt'
499    )
500
501    if metric_files:
502      print('\n{}'.format(au.mark_cyan('Aggregate test metrics')))
503      print(au.delimiter('-', 7))
504      for metric_file in metric_files:
505        self._print_test_metric(metric_file)
506
507  def _print_test_metric(self, metric_file):
508    """Print the content of the input metric file."""
509    test_metrics_re = re.compile(
510        r'test_results.*\s(.*)_aggregate_test_metrics_.*\.txt'
511    )
512    if not os.path.isfile(metric_file):
513      return
514    matches = re.findall(test_metrics_re, metric_file)
515    test_name = matches[0] if matches else ''
516    if test_name:
517      print('{}:'.format(au.mark_cyan(test_name)))
518      with open(metric_file, 'r', encoding='utf-8') as f:
519        matched = False
520        filter_res = atest_configs.GLOBAL_ARGS.aggregate_metric_filter
521        logging.debug('Aggregate metric filters: %s', filter_res)
522        test_methods = []
523        # Collect all test methods
524        if filter_res:
525          test_re = re.compile(r'\n\n(\S+)\n\n', re.MULTILINE)
526          test_methods = re.findall(test_re, f.read())
527          f.seek(0)
528          # The first line of the file is also a test method but could
529          # not parsed by test_re; add the first line manually.
530          first_line = f.readline()
531          test_methods.insert(0, str(first_line).strip())
532          f.seek(0)
533        for line in f.readlines():
534          stripped_line = str(line).strip()
535          if filter_res:
536            if stripped_line in test_methods:
537              print()
538              au.colorful_print(' ' * 4 + stripped_line, constants.MAGENTA)
539            for filter_re in filter_res:
540              if re.match(re.compile(filter_re), line):
541                matched = True
542                print(' ' * 4 + stripped_line)
543          else:
544            matched = True
545            print(' ' * 4 + stripped_line)
546        if not matched:
547          au.colorful_print(
548              '  Warning: Nothing returned by the pattern: {}'.format(
549                  filter_res
550              ),
551              constants.RED,
552          )
553        print()
554
555  def print_collect_tests(self):
556    """Print summary of collect tests only.
557
558    Returns:
559        0 if all tests collection done.
560    """
561    tests_ret = ExitCode.SUCCESS
562    if not self.runners:
563      return tests_ret
564    print(f'\n{au.mark_cyan("Summary: "+ constants.COLLECT_TESTS_ONLY)}')
565    print(au.delimiter('-', 26))
566    for runner_name, groups in self.runners.items():
567      for group_name, _ in groups.items():
568        name = group_name if group_name else runner_name
569        print(name)
570    print()
571    if self.log_path:
572      print(f'{_TEST_LOG_PATH_PRINT_PREFIX}{self.log_path}')
573    return ExitCode.SUCCESS
574
575  def print_failed_tests(self):
576    """Print the failed tests if existed."""
577    if self.failed_tests:
578      for test_name in self.failed_tests:
579        print(test_name)
580
581  def process_summary(self, name, stats):
582    """Process the summary line.
583
584    Strategy:
585        Error status happens ->
586            SomeTests: Passed: 2, Failed: 0 <red>(Completed With ERRORS)</red>
587            SomeTests: Passed: 2, <red>Failed</red>: 2 <red>(Completed With
588            ERRORS)</red>
589        More than 1 test fails ->
590            SomeTests: Passed: 2, <red>Failed</red>: 5
591        No test fails ->
592            SomeTests: <green>Passed</green>: 2, Failed: 0
593
594    Args:
595        name: A string of test name.
596        stats: A RunStat instance for a test group.
597
598    Returns:
599        A summary of the test result.
600    """
601    passed_label = 'Passed'
602    failed_label = 'Failed'
603    ignored_label = 'Ignored'
604    assumption_failed_label = 'Assumption Failed'
605    error_label = ''
606    host_log_content = ''
607    if stats.failed > 0:
608      failed_label = au.mark_red(failed_label)
609    if stats.run_errors:
610      error_label = au.mark_red('(Completed With ERRORS)')
611      # Only extract host_log_content if test name is tradefed
612      # Import here to prevent circular-import error.
613      from atest.test_runners import atest_tf_test_runner
614
615      if name == atest_tf_test_runner.AtestTradefedTestRunner.NAME:
616        find_logs = au.find_files(
617            self.log_path, file_name=constants.TF_HOST_LOG
618        )
619        if find_logs:
620          host_log_content = au.mark_red('\n\nTradefederation host log:\n')
621        for tf_log in find_logs:
622          if zipfile.is_zipfile(tf_log):
623            host_log_content = host_log_content + au.extract_zip_text(tf_log)
624          else:
625            with open(tf_log, 'r', encoding='utf-8') as f:
626              for line in f.readlines():
627                host_log_content = host_log_content + line
628
629      # Print the content for the standard error file for a single module.
630      if name and self.log_path and len(str(name).split()) > 1:
631        log_name = str(name).split()[1] + '-stderr_*.txt'
632        module_logs = au.find_files(self.log_path, file_name=log_name)
633        for log_file in module_logs:
634          print(
635              ' ' * 2
636              + au.mark_magenta(f'Logs in {os.path.basename(log_file)}:')
637          )
638          with open(log_file, 'r', encoding='utf-8') as f:
639            for line in f.readlines():
640              print(' ' * 2 + str(line), end='')
641    elif stats.failed == 0:
642      passed_label = au.mark_green(passed_label)
643    temp = ITER_COUNTS.get(name, {})
644    temp['passed'] = temp.get('passed', 0) + stats.passed
645    temp['failed'] = temp.get('failed', 0) + stats.failed
646    temp['ignored'] = temp.get('ignored', 0) + stats.ignored
647    temp['assumption_failed'] = (
648        temp.get('assumption_failed', 0) + stats.assumption_failed
649    )
650    ITER_COUNTS[name] = temp
651
652    summary = '%s: %s: %s, %s: %s, %s: %s, %s: %s %s %s' % (
653        name,
654        passed_label,
655        stats.passed,
656        failed_label,
657        stats.failed,
658        ignored_label,
659        stats.ignored,
660        assumption_failed_label,
661        stats.assumption_failed,
662        error_label,
663        host_log_content,
664    )
665    return summary
666
667  def _update_stats(self, test, group):
668    """Given the results of a single test, update test run stats.
669
670    Args:
671        test: a TestResult namedtuple.
672        group: a RunStat instance for a test group.
673    """
674    # TODO(109822985): Track group and run estimated totals for updating
675    # summary line
676    if test.status == test_runner_base.PASSED_STATUS:
677      self.run_stats.passed += 1
678      group.passed += 1
679    elif test.status == test_runner_base.IGNORED_STATUS:
680      self.run_stats.ignored += 1
681      group.ignored += 1
682    elif test.status == test_runner_base.ASSUMPTION_FAILED:
683      self.run_stats.assumption_failed += 1
684      group.assumption_failed += 1
685    elif test.status == test_runner_base.FAILED_STATUS:
686      self.run_stats.failed += 1
687      self.failed_tests.append(test.test_name)
688      group.failed += 1
689    elif test.status == test_runner_base.ERROR_STATUS:
690      self.run_stats.run_errors = True
691      group.run_errors = True
692    self.run_stats.perf_info.update_perf_info(test)
693
694  def _print_group_title(self, test):
695    """Print the title line for a test group.
696
697    Test Group/Runner Name
698    ----------------------
699
700    Args:
701        test: A TestResult namedtuple.
702    """
703    if self.silent:
704      return
705    title = test.group_name or test.runner_name
706    underline = '-' * (len(title))
707    print('\n%s\n%s' % (title, underline))
708
709  # pylint: disable=too-many-branches
710  def _print_result(self, test):
711    """Print the results of a single test.
712
713       Looks like:
714       fully.qualified.class#TestMethod: PASSED/FAILED
715
716    Args:
717        test: a TestResult namedtuple.
718    """
719    if self.silent:
720      return
721    if not self.pre_test or (test.test_run_name != self.pre_test.test_run_name):
722      print(
723          '%s (%s %s)'
724          % (
725              au.mark_blue(test.test_run_name),
726              test.group_total,
727              'Test' if test.group_total == 1 else 'Tests',
728          )
729      )
730    if test.status == test_runner_base.ERROR_STATUS:
731      print('RUNNER ERROR: %s\n' % test.details)
732      self.pre_test = test
733      return
734    if test.test_name:
735      color = ''
736      if test.status == test_runner_base.PASSED_STATUS:
737        # Example of output:
738        # [78/92] test_name: PASSED (92ms)
739        color = constants.GREEN
740      elif test.status in (
741          test_runner_base.IGNORED_STATUS,
742          test_runner_base.ASSUMPTION_FAILED,
743      ):
744        # Example: [33/92] test_name: IGNORED (12ms)
745        # Example: [33/92] test_name: ASSUMPTION_FAILED (12ms)
746        color = constants.MAGENTA
747      else:
748        # Example: [26/92] test_name: FAILED (32ms)
749        color = constants.RED
750      print(
751          '[{}/{}] {}'.format(
752              test.test_count, test.group_total, test.test_name
753          ),
754          end='',
755      )
756      if self.collect_only:
757        print()
758      else:
759        print(': {} {}'.format(au.colorize(test.status, color), test.test_time))
760      if test.status == test_runner_base.PASSED_STATUS:
761        for key, data in sorted(test.additional_info.items()):
762          if key not in BENCHMARK_EVENT_KEYS:
763            print(f'\t{au.mark_blue(key)}: {data}')
764      if test.status == test_runner_base.FAILED_STATUS:
765        print(f'\nSTACKTRACE:\n{test.details}')
766    self.pre_test = test
767