1#!/usr/bin/env python3
2#
3# Copyright (C) 2017 The Android Open Source Project
4#
5# Licensed under the Apache License, Version 2.0 (the "License");
6# you may not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9#      http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS,
13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16#
17"""Release test for simpleperf prebuilts.
18
19It includes below tests:
201. Test profiling Android apps on different Android versions (starting from Android N).
212. Test simpleperf python scripts on different Hosts (linux, darwin and windows) on x86_64.
223. Test using both devices and emulators.
234. Test using both `adb root` and `adb unroot`.
24
25"""
26
27import argparse
28from dataclasses import dataclass
29import fnmatch
30import inspect
31import multiprocessing as mp
32import os
33from pathlib import Path
34import re
35import subprocess
36import sys
37import time
38from tqdm import tqdm
39import types
40from typing import List, Optional
41import unittest
42
43from simpleperf_utils import BaseArgumentParser, extant_dir, log_exit, remove, is_darwin
44
45from . api_profiler_test import *
46from . annotate_test import *
47from . app_profiler_test import *
48from . app_test import *
49from . binary_cache_builder_test import *
50from . cpp_app_test import *
51from . debug_unwind_reporter_test import *
52from . gecko_profile_generator_test import *
53from . inferno_test import *
54from . java_app_test import *
55from . kotlin_app_test import *
56from . pprof_proto_generator_test import *
57from . purgatorio_test import *
58from . report_html_test import *
59from . report_lib_test import *
60from . report_sample_test import *
61from . run_simpleperf_on_device_test import *
62from . sample_filter_test import *
63from . stackcollapse_test import *
64from . tools_test import *
65from . test_utils import TestHelper
66
67
68def get_args() -> argparse.Namespace:
69    parser = BaseArgumentParser(description=__doc__)
70    parser.add_argument('--browser', action='store_true', help='open report html file in browser.')
71    parser.add_argument(
72        '-d', '--device', nargs='+',
73        help='set devices used to run tests. Each device in format name:serial-number')
74    parser.add_argument('--only-host-test', action='store_true', help='Only run host tests')
75    parser.add_argument('--list-tests', action='store_true', help='List tests')
76    parser.add_argument('--ndk-path', type=extant_dir, help='Set the path of a ndk release')
77    parser.add_argument('-p', '--pattern', nargs='+',
78                        help='Run tests matching the selected pattern.')
79    parser.add_argument('-r', '--repeat', type=int, default=1, help='times to repeat tests')
80    parser.add_argument('--test-from', help='Run tests following the selected test.')
81    parser.add_argument('--test-dir', default='test_dir', help='Directory to store test results')
82    return parser.parse_args()
83
84
85def get_all_tests() -> List[str]:
86    tests = []
87    for name, value in globals().items():
88        if isinstance(value, type) and issubclass(value, unittest.TestCase):
89            for member_name, member in inspect.getmembers(value):
90                if isinstance(member, (types.MethodType, types.FunctionType)):
91                    if member_name.startswith('test'):
92                        tests.append(name + '.' + member_name)
93    return sorted(tests)
94
95
96def get_host_tests() -> List[str]:
97    def filter_fn(test: str) -> bool:
98        return get_test_type(test) == 'host_test'
99    return list(filter(filter_fn, get_all_tests()))
100
101
102def get_filtered_tests(
103        tests: List[str],
104        test_from: Optional[str],
105        test_pattern: Optional[List[str]]) -> List[str]:
106    if test_from:
107        try:
108            tests = tests[tests.index(test_from):]
109        except ValueError:
110            log_exit("Can't find test %s" % test_from)
111    if test_pattern:
112        patterns = [re.compile(fnmatch.translate(x)) for x in test_pattern]
113        tests = [t for t in tests if any(pattern.match(t) for pattern in patterns)]
114        if not tests:
115            log_exit('No tests are matched.')
116    return tests
117
118
119def get_test_type(test: str) -> Optional[str]:
120    testcase_name, test_name = test.split('.')
121    if test_name == 'test_run_simpleperf_without_usb_connection':
122        return 'device_serialized_test'
123    if testcase_name in (
124        'TestApiProfiler', 'TestNativeProfiling', 'TestNativeLibDownloader',
125            'TestRecordingRealApps', 'TestRunSimpleperfOnDevice'):
126        return 'device_test'
127    if testcase_name.startswith('TestExample'):
128        return 'device_test'
129    if testcase_name in ('TestAnnotate',
130                         'TestBinaryCacheBuilder',
131                         'TestDebugUnwindReporter',
132                         'TestInferno',
133                         'TestPprofProtoGenerator',
134                         'TestProtoFileReportLib',
135                         'TestPurgatorio',
136                         'TestReportHtml',
137                         'TestReportLib',
138                         'TestReportSample',
139                         'TestSampleFilter',
140                         'TestStackCollapse',
141                         'TestTools',
142                         'TestGeckoProfileGenerator'):
143        return 'host_test'
144    return None
145
146
147def build_testdata(testdata_dir: Path):
148    """ Collect testdata in testdata_dir.
149        In system/extras/simpleperf/scripts, testdata comes from:
150            <script_dir>/../testdata, <script_dir>/test/script_testdata, <script_dir>/../demo
151        In prebuilts/simpleperf, testdata comes from:
152            <script_dir>/test/testdata
153    """
154    testdata_dir.mkdir()
155
156    script_test_dir = Path(__file__).resolve().parent
157    script_dir = script_test_dir.parent
158
159    source_dirs = [
160        script_test_dir / 'script_testdata',
161        script_test_dir / 'testdata',
162        script_dir.parent / 'testdata',
163        script_dir.parent / 'demo',
164        script_dir.parent / 'runtest',
165    ]
166
167    for source_dir in source_dirs:
168        if not source_dir.is_dir():
169            continue
170        for src_path in source_dir.iterdir():
171            dest_path = testdata_dir / src_path.name
172            if dest_path.exists():
173                continue
174            if src_path.is_file():
175                shutil.copyfile(src_path, dest_path)
176            elif src_path.is_dir():
177                shutil.copytree(src_path, dest_path)
178
179
180def run_tests(tests: List[str]) -> bool:
181    argv = [sys.argv[0]] + tests
182    test_runner = unittest.TextTestRunner(stream=TestHelper.log_fh, verbosity=0)
183    test_program = unittest.main(argv=argv, testRunner=test_runner,
184                                 exit=False, verbosity=0, module='test.do_test')
185    return test_program.result.wasSuccessful()
186
187
188def test_process_entry(tests: List[str], test_options: List[str], conn: mp.connection.Connection):
189    parser = argparse.ArgumentParser()
190    parser.add_argument('--browser', action='store_true')
191    parser.add_argument('--device', help='android device serial number')
192    parser.add_argument('--ndk-path', type=extant_dir)
193    parser.add_argument('--testdata-dir', type=extant_dir)
194    parser.add_argument('--test-dir', help='directory to store test results')
195    args = parser.parse_args(test_options)
196
197    TestHelper.init(args.test_dir, args.testdata_dir,
198                    args.browser, args.ndk_path, args.device, conn)
199    run_tests(tests)
200
201
202@dataclass
203class Device:
204    name: str
205    serial_number: str
206
207
208@dataclass
209class TestResult:
210    try_time: int
211    status: str
212    duration: str
213
214    def __str__(self) -> str:
215        s = self.status
216        if s == 'FAILED':
217            s += f' (at try_time {self.try_time})'
218        s += f' {self.duration}'
219        return s
220
221
222class TestProcess:
223    """ Create a test process to run selected tests on a device. """
224
225    TEST_MAX_TRY_TIME = 10
226    TEST_TIMEOUT_IN_SEC = 10 * 60
227
228    def __init__(
229            self, test_type: str, tests: List[str],
230            device: Optional[Device],
231            repeat_index: int,
232            test_options: List[str]):
233        self.test_type = test_type
234        self.tests = tests
235        self.device = device
236        self.repeat_index = repeat_index
237        self.test_options = test_options
238        self.try_time = 1
239        self.test_results: Dict[str, TestResult] = {}
240        self.parent_conn: Optional[mp.connection.Connection] = None
241        self.proc: Optional[mp.Process] = None
242        self.last_update_time = 0.0
243        self._start_test_process()
244
245    def _start_test_process(self):
246        unfinished_tests = [test for test in self.tests if test not in self.test_results]
247        self.parent_conn, child_conn = mp.Pipe(duplex=False)
248        test_options = self.test_options[:]
249        test_options += ['--test-dir', str(self.test_dir)]
250        if self.device:
251            test_options += ['--device', self.device.serial_number]
252        self.proc = mp.Process(target=test_process_entry, args=(
253            unfinished_tests, test_options, child_conn))
254        self.proc.start()
255        self.last_update_time = time.time()
256
257    @property
258    def name(self) -> str:
259        name = self.test_type
260        if self.device:
261            name += '_' + self.device.name
262        name += '_repeat_%d' % self.repeat_index
263        return name
264
265    @property
266    def test_dir(self) -> Path:
267        """ Directory to run the tests. """
268        return Path.cwd() / (self.name + '_try_%d' % self.try_time)
269
270    @property
271    def alive(self) -> bool:
272        """ Return if the test process is alive. """
273        return self.proc.is_alive()
274
275    @property
276    def finished(self) -> bool:
277        """ Return if all tests are finished. """
278        return len(self.test_results) == len(self.tests)
279
280    def check_update(self):
281        """ Check if there is any test update. """
282        try:
283            while self.parent_conn.poll():
284                msg = self.parent_conn.recv()
285                self._process_msg(msg)
286                self.last_update_time = time.time()
287        except (EOFError, BrokenPipeError) as e:
288            pass
289        if time.time() - self.last_update_time > TestProcess.TEST_TIMEOUT_IN_SEC:
290            self.proc.terminate()
291
292    def _process_msg(self, msg: str):
293        test_name, test_success, test_duration = msg.split()
294        self.test_results[test_name] = TestResult(self.try_time, test_success, test_duration)
295
296    def join(self):
297        self.proc.join()
298
299    def restart(self) -> bool:
300        """ Create a new test process to run unfinished tests. """
301        if self.finished:
302            return False
303        if self.try_time == self.TEST_MAX_TRY_TIME:
304            """ Exceed max try time. So mark left tests as failed. """
305            for test in self.tests:
306                if test not in self.test_results:
307                    test_duration = '%.3fs' % (time.time() - self.last_update_time)
308                    self.test_results[test] = TestResult(self.try_time, 'FAILED', test_duration)
309            return False
310
311        self.try_time += 1
312        self._start_test_process()
313        return True
314
315
316class ProgressBar:
317    def __init__(self, total_count: int):
318        self.total_bar = tqdm(
319            total=total_count, desc='test progress', ascii=' ##',
320            bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}, {rate_fmt}", position=0)
321        self.test_process_bars: Dict[str, tqdm] = {}
322
323    def update(self, test_proc: TestProcess):
324        if test_proc.name not in self.test_process_bars:
325            bar = tqdm(total=len(test_proc.tests),
326                       desc=test_proc.name, ascii=' ##',
327                       bar_format="{l_bar}{bar} | {n_fmt}/{total_fmt} [{elapsed}]")
328            self.test_process_bars[test_proc.name] = bar
329        else:
330            bar = self.test_process_bars[test_proc.name]
331
332        add = len(test_proc.test_results) - bar.n
333        if add:
334            bar.update(add)
335            self.total_bar.update(add)
336
337    def end_test_proc(self, test_proc: TestProcess):
338        if test_proc.name in self.test_process_bars:
339            self.test_process_bars[test_proc.name].close()
340            del self.test_process_bars[test_proc.name]
341
342    def end_tests(self):
343        for bar in self.test_process_bars.values():
344            bar.close()
345        self.total_bar.close()
346
347
348class TestSummary:
349    def __init__(
350            self, devices: List[Device],
351            device_tests: List[str],
352            repeat_count: int, host_tests: List[str]):
353        self.results: Dict[Tuple[str, str], Optional[TestResult]] = {}
354        for test in device_tests:
355            for device in devices:
356                for repeat_index in range(1, repeat_count + 1):
357                    self.results[(test, '%s_repeat_%d' % (device.name, repeat_index))] = None
358        for test in host_tests:
359            self.results[(test, 'host')] = None
360        self.write_summary()
361
362    @property
363    def test_count(self) -> int:
364        return len(self.results)
365
366    @property
367    def failed_test_count(self) -> int:
368        count = 0
369        for result in self.results.values():
370            if result is None or result.status == 'FAILED':
371                count += 1
372        return count
373
374    def update(self, test_proc: TestProcess):
375        if test_proc.device:
376            test_env = '%s_repeat_%d' % (test_proc.device.name, test_proc.repeat_index)
377        else:
378            test_env = 'host'
379        has_update = False
380        for test, result in test_proc.test_results.items():
381            key = (test, test_env)
382            if self.results[key] != result:
383                self.results[key] = result
384                has_update = True
385        if has_update:
386            self.write_summary()
387
388    def write_summary(self):
389        with open('test_summary.txt', 'w') as fh, \
390                open('failed_test_summary.txt', 'w') as failed_fh:
391            for key in sorted(self.results.keys()):
392                test_name, test_env = key
393                result = self.results[key]
394                message = f'{test_name}    {test_env}    {result}'
395                print(message, file=fh)
396                if not result or result.status == 'FAILED':
397                    print(message, file=failed_fh)
398
399
400class TestManager:
401    """ Create test processes, monitor their status and log test progresses. """
402
403    def __init__(self, args: argparse.Namespace):
404        self.repeat_count = args.repeat
405        self.test_options = self._build_test_options(args)
406        self.devices = self._build_test_devices(args)
407        self.progress_bar: Optional[ProgressBar] = None
408        self.test_summary: Optional[TestSummary] = None
409
410    def _build_test_devices(self, args: argparse.Namespace) -> List[Device]:
411        devices = []
412        if args.device:
413            for s in args.device:
414                name, serial_number = s.split(':', 1)
415                devices.append(Device(name, serial_number))
416        else:
417            devices.append(Device('default', ''))
418        return devices
419
420    def _build_test_options(self, args: argparse.Namespace) -> List[str]:
421        test_options: List[str] = []
422        if args.browser:
423            test_options.append('--browser')
424        if args.ndk_path:
425            test_options += ['--ndk-path', args.ndk_path]
426        testdata_dir = Path('testdata').resolve()
427        test_options += ['--testdata-dir', str(testdata_dir)]
428        return test_options
429
430    def run_all_tests(self, tests: List[str]):
431        device_tests = []
432        device_serialized_tests = []
433        host_tests = []
434        for test in tests:
435            test_type = get_test_type(test)
436            assert test_type, f'No test type for test {test}'
437            if test_type == 'device_test':
438                device_tests.append(test)
439            if test_type == 'device_serialized_test':
440                device_serialized_tests.append(test)
441            if test_type == 'host_test':
442                host_tests.append(test)
443        total_test_count = (len(device_tests) + len(device_serialized_tests)
444                            ) * len(self.devices) * self.repeat_count + len(host_tests)
445        self.progress_bar = ProgressBar(total_test_count)
446        self.test_summary = TestSummary(self.devices, device_tests + device_serialized_tests,
447                                        self.repeat_count, host_tests)
448        if device_tests:
449            self.run_device_tests(device_tests)
450        if device_serialized_tests:
451            self.run_device_serialized_tests(device_serialized_tests)
452        if host_tests:
453            self.run_host_tests(host_tests)
454        self.progress_bar.end_tests()
455        self.progress_bar = None
456
457    def run_device_tests(self, tests: List[str]):
458        """ Tests can run in parallel on different devices. """
459        test_procs: List[TestProcess] = []
460        for device in self.devices:
461            test_procs.append(TestProcess('device_test', tests, device, 1, self.test_options))
462        self.wait_for_test_results(test_procs, self.repeat_count)
463
464    def run_device_serialized_tests(self, tests: List[str]):
465        """ Tests run on each device in order. """
466        for device in self.devices:
467            test_proc = TestProcess('device_serialized_test', tests, device, 1, self.test_options)
468            self.wait_for_test_results([test_proc], self.repeat_count)
469
470    def run_host_tests(self, tests: List[str]):
471        """ Tests run only once on host. """
472        test_proc = TestProcess('host_tests', tests, None, 1, self.test_options)
473        self.wait_for_test_results([test_proc], 1)
474
475    def wait_for_test_results(self, test_procs: List[TestProcess], repeat_count: int):
476        test_count = sum(len(test_proc.tests) for test_proc in test_procs)
477        while test_procs:
478            dead_procs: List[TestProcess] = []
479            # Check update.
480            for test_proc in test_procs:
481                if not test_proc.alive:
482                    dead_procs.append(test_proc)
483                test_proc.check_update()
484                self.progress_bar.update(test_proc)
485                self.test_summary.update(test_proc)
486
487            # Process dead procs.
488            for test_proc in dead_procs:
489                test_proc.join()
490                if not test_proc.finished:
491                    if test_proc.restart():
492                        continue
493                    else:
494                        self.progress_bar.update(test_proc)
495                        self.test_summary.update(test_proc)
496                self.progress_bar.end_test_proc(test_proc)
497                test_procs.remove(test_proc)
498                if test_proc.repeat_index < repeat_count:
499                    test_procs.append(
500                        TestProcess(test_proc.test_type, test_proc.tests, test_proc.device,
501                                    test_proc.repeat_index + 1, test_proc.test_options))
502            time.sleep(0.1)
503        return True
504
505
506def run_tests_in_child_process(tests: List[str], args: argparse.Namespace) -> bool:
507    """ run tests in child processes, read test results through a pipe. """
508    mp.set_start_method('spawn')  # to be consistent on darwin, linux, windows
509    test_manager = TestManager(args)
510    test_manager.run_all_tests(tests)
511
512    total_test_count = test_manager.test_summary.test_count
513    failed_test_count = test_manager.test_summary.failed_test_count
514    if failed_test_count == 0:
515        print('All tests passed!')
516        return True
517    print('%d of %d tests failed. See %s/failed_test_summary.txt for details.' %
518          (failed_test_count, total_test_count, args.test_dir))
519    return False
520
521
522def main() -> bool:
523    args = get_args()
524    tests = get_host_tests() if args.only_host_test else get_all_tests()
525    tests = get_filtered_tests(tests, args.test_from, args.pattern)
526
527    if args.list_tests:
528        print('\n'.join(tests))
529        return True
530
531    test_dir = Path(args.test_dir).resolve()
532    remove(test_dir)
533    test_dir.mkdir(parents=True)
534    # Switch to the test dir.
535    os.chdir(test_dir)
536    build_testdata(Path('testdata'))
537    return run_tests_in_child_process(tests, args)
538