1 /*
2  * Copyright (C) 2017 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <errno.h>
18 #include <fcntl.h>
19 #include <inttypes.h>
20 #include <poll.h>
21 #include <signal.h>
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <string.h>
25 #include <unistd.h>
26 
27 #include <atomic>
28 #include <memory>
29 #include <string>
30 #include <tuple>
31 #include <vector>
32 
33 #include <gtest/gtest.h>
34 #if defined(__BIONIC__)
35 #include <sys/system_properties.h>
36 #endif
37 
38 #include "Color.h"
39 #include "Isolate.h"
40 #include "Log.h"
41 #include "NanoTime.h"
42 #include "Test.h"
43 
44 namespace android {
45 namespace gtest_extras {
46 
47 static std::atomic_int g_signal;
48 
SignalHandler(int sig)49 static void SignalHandler(int sig) {
50   g_signal = sig;
51 }
52 
RegisterSignalHandler()53 static void RegisterSignalHandler() {
54   auto ret = signal(SIGINT, SignalHandler);
55   if (ret == SIG_ERR) {
56     FATAL_PLOG("Setting up SIGINT handler failed");
57   }
58   ret = signal(SIGQUIT, SignalHandler);
59   if (ret == SIG_ERR) {
60     FATAL_PLOG("Setting up SIGQUIT handler failed");
61   }
62 }
63 
UnregisterSignalHandler()64 static void UnregisterSignalHandler() {
65   auto ret = signal(SIGINT, SIG_DFL);
66   if (ret == SIG_ERR) {
67     FATAL_PLOG("Disabling SIGINT handler failed");
68   }
69   ret = signal(SIGQUIT, SIG_DFL);
70   if (ret == SIG_ERR) {
71     FATAL_PLOG("Disabling SIGQUIT handler failed");
72   }
73 }
74 
PluralizeString(size_t value,const char * name,bool uppercase=false)75 static std::string PluralizeString(size_t value, const char* name, bool uppercase = false) {
76   std::string string(std::to_string(value) + name);
77   if (value != 1) {
78     if (uppercase) {
79       string += 'S';
80     } else {
81       string += 's';
82     }
83   }
84   return string;
85 }
86 
StartsWithDisabled(const std::string & str)87 inline static bool StartsWithDisabled(const std::string& str) {
88   static constexpr char kDisabledStr[] = "DISABLED_";
89   static constexpr size_t kDisabledStrLen = sizeof(kDisabledStr) - 1;
90   return str.compare(0, kDisabledStrLen, kDisabledStr) == 0;
91 }
92 
EnumerateTests()93 void Isolate::EnumerateTests() {
94   // Only apply --gtest_filter if present. This is the only option that changes
95   // what tests are listed.
96   std::string command(child_args_[0]);
97   if (!options_.filter().empty()) {
98     command += " --gtest_filter=" + options_.filter();
99   }
100   command += " --gtest_list_tests";
101 #if defined(__BIONIC__)
102   // Only bionic is guaranteed to support the 'e' option.
103   FILE* fp = popen(command.c_str(), "re");
104 #else
105   FILE* fp = popen(command.c_str(), "r");
106 #endif
107   if (fp == nullptr) {
108     FATAL_PLOG("Unexpected failure from popen");
109   }
110 
111   uint64_t total_shards = options_.total_shards();
112   bool sharded = total_shards > 1;
113   uint64_t test_count = 0;
114   if (sharded) {
115     test_count = options_.shard_index() + 1;
116   }
117 
118   bool skip_until_next_suite = false;
119   std::string suite_name;
120   char* buffer = nullptr;
121   size_t buffer_len = 0;
122   bool new_suite = false;
123   while (getline(&buffer, &buffer_len, fp) > 0) {
124     if (buffer[0] != ' ') {
125       // This is the case name.
126       suite_name = buffer;
127       auto space_index = suite_name.find(' ');
128       if (space_index != std::string::npos) {
129         suite_name.erase(space_index);
130       }
131       if (suite_name.back() == '\n') {
132         suite_name.resize(suite_name.size() - 1);
133       }
134 
135       if (!options_.allow_disabled_tests() && StartsWithDisabled(suite_name)) {
136         // This whole set of tests have been disabled, skip them all.
137         skip_until_next_suite = true;
138       } else {
139         new_suite = true;
140         skip_until_next_suite = false;
141       }
142     } else if (buffer[0] == ' ' && buffer[1] == ' ') {
143       if (!skip_until_next_suite) {
144         std::string test_name = &buffer[2];
145         auto space_index = test_name.find(' ');
146         if (space_index != std::string::npos) {
147           test_name.erase(space_index);
148         }
149         if (test_name.back() == '\n') {
150           test_name.resize(test_name.size() - 1);
151         }
152         if (options_.allow_disabled_tests() || !StartsWithDisabled(test_name)) {
153           if (!sharded || --test_count == 0) {
154             tests_.push_back(std::make_tuple(suite_name, test_name));
155             total_tests_++;
156             if (new_suite) {
157               // Only increment the number of suites when we find at least one test
158               // for the suites.
159               total_suites_++;
160               new_suite = false;
161             }
162             if (sharded) {
163               test_count = total_shards;
164             }
165           }
166         } else {
167           total_disable_tests_++;
168         }
169       } else {
170         total_disable_tests_++;
171       }
172     } else {
173       printf("Unexpected output from test listing.\nCommand:\n%s\nLine:\n%s\n", command.c_str(),
174              buffer);
175       exit(1);
176     }
177   }
178   free(buffer);
179   if (pclose(fp) == -1) {
180     FATAL_PLOG("Unexpected failure from pclose");
181   }
182 }
183 
ChildProcessFn(const std::tuple<std::string,std::string> & test)184 int Isolate::ChildProcessFn(const std::tuple<std::string, std::string>& test) {
185   // Make sure the filter is only coming from our command-line option.
186   unsetenv("GTEST_FILTER");
187 
188   // Add the filter argument.
189   std::vector<char*> args(child_args_);
190   std::string filter("--gtest_filter=" + GetTestName(test));
191   args.push_back(filter.data());
192 
193   int argc = static_cast<int>(args.size());
194   // Add the null terminator.
195   args.push_back(nullptr);
196   ::testing::InitGoogleTest(&argc, args.data());
197   return RUN_ALL_TESTS();
198 }
199 
Pipe(int * read_fd,int * write_fd)200 static bool Pipe(int* read_fd, int* write_fd) {
201   int pipefd[2];
202 
203 #if defined(__linux__)
204   if (pipe2(pipefd, O_CLOEXEC) != 0) {
205     return false;
206   }
207 #else  // defined(__APPLE__)
208   if (pipe(pipefd) != 0) {
209     return false;
210   }
211   if (fcntl(pipefd[0], F_SETFD, FD_CLOEXEC) != 0 || fcntl(pipefd[1], F_SETFD, FD_CLOEXEC)) {
212     close(pipefd[0]);
213     close(pipefd[1]);
214     return false;
215   }
216 #endif
217 
218   *read_fd = pipefd[0];
219   *write_fd = pipefd[1];
220   return true;
221 }
222 
LaunchTests()223 void Isolate::LaunchTests() {
224   while (!running_indices_.empty() && cur_test_index_ < tests_.size()) {
225     int read_fd, write_fd;
226     if (!Pipe(&read_fd, &write_fd)) {
227       FATAL_PLOG("Unexpected failure from pipe");
228     }
229     if (fcntl(read_fd, F_SETFL, O_NONBLOCK) == -1) {
230       FATAL_PLOG("Unexpected failure from fcntl");
231     }
232 
233     pid_t pid = fork();
234     if (pid == -1) {
235       FATAL_PLOG("Unexpected failure from fork");
236     }
237     if (pid == 0) {
238       close(read_fd);
239       close(STDOUT_FILENO);
240       close(STDERR_FILENO);
241       if (dup2(write_fd, STDOUT_FILENO) == -1) {
242         exit(1);
243       }
244       if (dup2(write_fd, STDERR_FILENO) == -1) {
245         exit(1);
246       }
247       close(write_fd);
248       UnregisterSignalHandler();
249       exit(ChildProcessFn(tests_[cur_test_index_]));
250     }
251 
252     size_t run_index = running_indices_.back();
253     running_indices_.pop_back();
254     Test* test = new Test(tests_[cur_test_index_], cur_test_index_, run_index, read_fd);
255     running_by_pid_.emplace(pid, test);
256     running_[run_index] = test;
257     running_by_test_index_[cur_test_index_] = test;
258 
259     pollfd* pollfd = &running_pollfds_[run_index];
260     pollfd->fd = test->fd();
261     pollfd->events = POLLIN;
262     cur_test_index_++;
263     close(write_fd);
264   }
265 }
266 
ReadTestsOutput()267 void Isolate::ReadTestsOutput() {
268   int ready = poll(running_pollfds_.data(), static_cast<nfds_t>(running_pollfds_.size()), 0);
269   if (ready <= 0) {
270     return;
271   }
272 
273   for (size_t i = 0; i < running_pollfds_.size(); i++) {
274     pollfd* pfd = &running_pollfds_[i];
275     if (pfd->revents & POLLIN) {
276       Test* test = running_[i];
277       if (!test->Read()) {
278         test->CloseFd();
279         pfd->fd = 0;
280         pfd->events = 0;
281       }
282     }
283     pfd->revents = 0;
284   }
285 }
286 
CheckTestsFinished()287 size_t Isolate::CheckTestsFinished() {
288   size_t finished_tests = 0;
289   int status;
290   pid_t pid;
291   while ((pid = static_cast<pid_t>(TEMP_FAILURE_RETRY(waitpid(-1, &status, WNOHANG)))) > 0) {
292     if (pid == -1) {
293       FATAL_PLOG("Unexpected failure from waitpid");
294     }
295     auto entry = running_by_pid_.find(pid);
296     if (entry == running_by_pid_.end()) {
297       FATAL_LOG("Found process not spawned by the isolation framework");
298     }
299 
300     std::unique_ptr<Test>& test_ptr = entry->second;
301     Test* test = test_ptr.get();
302     test->Stop();
303 
304     // Read any leftover data.
305     test->ReadUntilClosed();
306     if (test->result() == TEST_NONE) {
307       if (WIFSIGNALED(status)) {
308         std::string output(test->name() + " terminated by signal: " + strsignal(WTERMSIG(status)) +
309                            ".\n");
310         test->AppendOutput(output);
311         test->set_result(TEST_FAIL);
312       } else {
313         int exit_code = WEXITSTATUS(status);
314         if (exit_code != 0) {
315           std::string output(test->name() + " exited with exitcode " + std::to_string(exit_code) +
316                              ".\n");
317           test->AppendOutput(output);
318           test->set_result(TEST_FAIL);
319         } else {
320           // Set the result based on the output, since skipped tests and
321           // passing tests have the same exit status.
322           test->SetResultFromOutput();
323         }
324       }
325     } else if (test->result() == TEST_TIMEOUT) {
326       uint64_t time_ms = options_.deadline_threshold_ms();
327       std::string timeout_str(test->name() + " killed because of timeout at " +
328                               std::to_string(time_ms) + " ms.\n");
329       test->AppendOutput(timeout_str);
330     }
331 
332     if (test->ExpectFail()) {
333       if (test->result() == TEST_FAIL) {
334         // The test is expected to fail, it failed.
335         test->set_result(TEST_XFAIL);
336       } else if (test->result() == TEST_PASS) {
337         // The test is expected to fail, it passed.
338         test->set_result(TEST_XPASS);
339       }
340     }
341 
342     test->Print();
343 
344     switch (test->result()) {
345       case TEST_PASS:
346         total_pass_tests_++;
347         if (test->slow()) {
348           total_slow_tests_++;
349         }
350         break;
351       case TEST_XPASS:
352         total_xpass_tests_++;
353         break;
354       case TEST_FAIL:
355         total_fail_tests_++;
356         break;
357       case TEST_TIMEOUT:
358         total_timeout_tests_++;
359         break;
360       case TEST_XFAIL:
361         total_xfail_tests_++;
362         break;
363       case TEST_SKIPPED:
364         total_skipped_tests_++;
365         break;
366       case TEST_NONE:
367         FATAL_LOG("Test result is TEST_NONE, this should not be possible");
368     }
369     finished_tests++;
370     size_t test_index = test->test_index();
371     finished_.emplace(test_index, test_ptr.release());
372     running_indices_.push_back(test->run_index());
373 
374     // Remove it from all of the running indices.
375     size_t run_index = test->run_index();
376     if (running_by_pid_.erase(pid) != 1) {
377       printf("Internal error: Erasing pid %d from running_by_pid_ incorrect\n", pid);
378     }
379     if (running_by_test_index_.erase(test_index) == 0) {
380       printf("Internal error: Erasing test_index %zu from running_by_pid_ incorrect\n", test_index);
381     }
382     running_[run_index] = nullptr;
383     running_pollfds_[run_index] = {};
384   }
385 
386   // The only valid error case is if ECHILD is returned because there are
387   // no more processes left running.
388   if (pid == -1 && errno != ECHILD) {
389     FATAL_PLOG("Unexpected failure from waitpid");
390   }
391   return finished_tests;
392 }
393 
CheckTestsTimeout()394 void Isolate::CheckTestsTimeout() {
395   for (auto& entry : running_by_pid_) {
396     Test* test = entry.second.get();
397     if (test->result() == TEST_TIMEOUT) {
398       continue;
399     }
400 
401     if (NanoTime() > test->start_ns() + deadline_threshold_ns_) {
402       test->set_result(TEST_TIMEOUT);
403       // Do not mark this as slow and timed out.
404       test->set_slow(false);
405       // Test gets cleaned up in CheckTestsFinished.
406       kill(entry.first, SIGKILL);
407     } else if (!test->slow() && NanoTime() > test->start_ns() + slow_threshold_ns_) {
408       // Mark the test as running slow.
409       test->set_slow(true);
410     }
411   }
412 }
413 
HandleSignals()414 void Isolate::HandleSignals() {
415   int signal = g_signal.exchange(0);
416   if (signal == SIGINT) {
417     printf("Terminating due to signal...\n");
418     for (auto& entry : running_by_pid_) {
419       kill(entry.first, SIGKILL);
420     }
421     exit(1);
422   } else if (signal == SIGQUIT) {
423     printf("List of current running tests:\n");
424     for (const auto& entry : running_by_test_index_) {
425       const Test* test = entry.second;
426       uint64_t run_time_ms = (NanoTime() - test->start_ns()) / kNsPerMs;
427       printf("  %s (elapsed time %" PRId64 " ms)\n", test->name().c_str(), run_time_ms);
428     }
429   }
430 }
431 
RunAllTests()432 void Isolate::RunAllTests() {
433   total_pass_tests_ = 0;
434   total_xpass_tests_ = 0;
435   total_fail_tests_ = 0;
436   total_xfail_tests_ = 0;
437   total_timeout_tests_ = 0;
438   total_slow_tests_ = 0;
439   total_skipped_tests_ = 0;
440 
441   running_by_test_index_.clear();
442 
443   size_t job_count = options_.job_count();
444   running_.clear();
445   running_.resize(job_count);
446   running_pollfds_.resize(job_count);
447   memset(running_pollfds_.data(), 0, running_pollfds_.size() * sizeof(pollfd));
448   running_indices_.clear();
449   for (size_t i = 0; i < job_count; i++) {
450     running_indices_.push_back(i);
451   }
452 
453   finished_.clear();
454 
455   size_t finished = 0;
456   cur_test_index_ = 0;
457   while (finished < tests_.size()) {
458     LaunchTests();
459 
460     ReadTestsOutput();
461 
462     finished += CheckTestsFinished();
463 
464     CheckTestsTimeout();
465 
466     HandleSignals();
467 
468     usleep(MIN_USECONDS_WAIT);
469   }
470 }
471 
PrintResults(size_t total,const ResultsType & results,std::string * footer)472 void Isolate::PrintResults(size_t total, const ResultsType& results, std::string* footer) {
473   ColoredPrintf(results.color, results.prefix);
474   if (results.list_desc != nullptr) {
475     printf(" %s %s, listed below:\n", PluralizeString(total, " test").c_str(), results.list_desc);
476   } else {
477     printf(" %s, listed below:\n", PluralizeString(total, " test").c_str());
478   }
479   for (const auto& entry : finished_) {
480     const Test* test = entry.second.get();
481     if (results.match_func(*test)) {
482       ColoredPrintf(results.color, results.prefix);
483       printf(" %s", test->name().c_str());
484       if (results.print_func != nullptr) {
485         results.print_func(options_, *test);
486       }
487       printf("\n");
488     }
489   }
490 
491   if (results.title == nullptr) {
492     return;
493   }
494 
495   if (total < 10) {
496     *footer += ' ';
497   }
498   *footer +=
499       PluralizeString(total, (std::string(" ") + results.title + " TEST").c_str(), true) + '\n';
500 }
501 
502 Isolate::ResultsType Isolate::SlowResults = {
503     .color = COLOR_YELLOW,
504     .prefix = "[  SLOW    ]",
505     .list_desc = nullptr,
506     .title = "SLOW",
__anon008f9c780102() 507     .match_func = [](const Test& test) { return test.slow(); },
508     .print_func =
__anon008f9c780202() 509         [](const Options& options, const Test& test) {
510           printf(" (%" PRIu64 " ms, exceeded %" PRIu64 " ms)", test.RunTimeNs() / kNsPerMs,
511                  options.slow_threshold_ms());
512         },
513 };
514 
515 Isolate::ResultsType Isolate::XpassFailResults = {
516     .color = COLOR_RED,
517     .prefix = "[  FAILED  ]",
518     .list_desc = "should have failed",
519     .title = "SHOULD HAVE FAILED",
__anon008f9c780302() 520     .match_func = [](const Test& test) { return test.result() == TEST_XPASS; },
521     .print_func = nullptr,
522 };
523 
524 Isolate::ResultsType Isolate::FailResults = {
525     .color = COLOR_RED,
526     .prefix = "[  FAILED  ]",
527     .list_desc = nullptr,
528     .title = "FAILED",
__anon008f9c780402() 529     .match_func = [](const Test& test) { return test.result() == TEST_FAIL; },
530     .print_func = nullptr,
531 };
532 
533 Isolate::ResultsType Isolate::TimeoutResults = {
534     .color = COLOR_RED,
535     .prefix = "[  TIMEOUT ]",
536     .list_desc = nullptr,
537     .title = "TIMEOUT",
__anon008f9c780502() 538     .match_func = [](const Test& test) { return test.result() == TEST_TIMEOUT; },
539     .print_func =
__anon008f9c780602() 540         [](const Options&, const Test& test) {
541           printf(" (stopped at %" PRIu64 " ms)", test.RunTimeNs() / kNsPerMs);
542         },
543 };
544 
545 Isolate::ResultsType Isolate::SkippedResults = {
546     .color = COLOR_GREEN,
547     .prefix = "[  SKIPPED ]",
548     .list_desc = nullptr,
549     .title = nullptr,
__anon008f9c780702() 550     .match_func = [](const Test& test) { return test.result() == TEST_SKIPPED; },
551     .print_func = nullptr,
552 };
553 
PrintFooter(uint64_t elapsed_time_ns)554 void Isolate::PrintFooter(uint64_t elapsed_time_ns) {
555   ColoredPrintf(COLOR_GREEN, "[==========]");
556   printf(" %s from %s ran. (%" PRId64 " ms total)\n",
557          PluralizeString(total_tests_, " test").c_str(),
558          PluralizeString(total_suites_, " test suite").c_str(), elapsed_time_ns / kNsPerMs);
559 
560   ColoredPrintf(COLOR_GREEN, "[  PASSED  ]");
561   printf(" %s.", PluralizeString(total_pass_tests_ + total_xfail_tests_, " test").c_str());
562   if (total_xfail_tests_ != 0) {
563     printf(" (%s)", PluralizeString(total_xfail_tests_, " expected failure").c_str());
564   }
565   printf("\n");
566 
567   std::string footer;
568 
569   // Tests that were skipped.
570   if (total_skipped_tests_ != 0) {
571     PrintResults(total_skipped_tests_, SkippedResults, &footer);
572   }
573 
574   // Tests that ran slow.
575   if (total_slow_tests_ != 0) {
576     PrintResults(total_slow_tests_, SlowResults, &footer);
577   }
578 
579   // Tests that passed but should have failed.
580   if (total_xpass_tests_ != 0) {
581     PrintResults(total_xpass_tests_, XpassFailResults, &footer);
582   }
583 
584   // Tests that timed out.
585   if (total_timeout_tests_ != 0) {
586     PrintResults(total_timeout_tests_, TimeoutResults, &footer);
587   }
588 
589   // Tests that failed.
590   if (total_fail_tests_ != 0) {
591     PrintResults(total_fail_tests_, FailResults, &footer);
592   }
593 
594   if (!footer.empty()) {
595     printf("\n%s", footer.c_str());
596   }
597 
598   if (total_disable_tests_ != 0) {
599     if (footer.empty()) {
600       printf("\n");
601     }
602     ColoredPrintf(COLOR_YELLOW, "  YOU HAVE %s\n\n",
603                   PluralizeString(total_disable_tests_, " DISABLED TEST", true).c_str());
604   }
605 
606   fflush(stdout);
607 }
608 
XmlEscape(const std::string & xml)609 std::string XmlEscape(const std::string& xml) {
610   std::string escaped;
611   escaped.reserve(xml.size());
612 
613   for (auto c : xml) {
614     switch (c) {
615       case '<':
616         escaped.append("&lt;");
617         break;
618       case '>':
619         escaped.append("&gt;");
620         break;
621       case '&':
622         escaped.append("&amp;");
623         break;
624       case '\'':
625         escaped.append("&apos;");
626         break;
627       case '"':
628         escaped.append("&quot;");
629         break;
630       default:
631         escaped.append(1, c);
632         break;
633     }
634   }
635 
636   return escaped;
637 }
638 
639 class TestResultPrinter : public ::testing::EmptyTestEventListener {
640  public:
TestResultPrinter()641   TestResultPrinter() : pinfo_(nullptr) {}
OnTestStart(const::testing::TestInfo & test_info)642   virtual void OnTestStart(const ::testing::TestInfo& test_info) {
643     pinfo_ = &test_info;  // Record test_info for use in OnTestPartResult.
644   }
645   virtual void OnTestPartResult(const ::testing::TestPartResult& result);
646 
647  private:
648   const ::testing::TestInfo* pinfo_;
649 };
650 
651 // Called after an assertion failure.
OnTestPartResult(const::testing::TestPartResult & result)652 void TestResultPrinter::OnTestPartResult(const ::testing::TestPartResult& result) {
653   // If the test part succeeded, we don't need to do anything.
654   if (result.type() == ::testing::TestPartResult::kSuccess) {
655     return;
656   }
657 
658   if (result.type() == ::testing::TestPartResult::kSkip) {
659     printf("%s:(%d) Skipped\n", result.file_name(), result.line_number());
660     if (*result.message()) {
661       printf("%s\n", result.message());
662     }
663   } else {
664     // Print failure message from the assertion (e.g. expected this and got that).
665     printf("%s:(%d) Failure in test %s.%s\n%s\n", result.file_name(), result.line_number(),
666            pinfo_->test_suite_name(), pinfo_->name(), result.message());
667   }
668   fflush(stdout);
669 }
670 
671 // Output xml file when --gtest_output is used, write this function as we can't reuse
672 // gtest.cc:XmlUnitTestResultPrinter. The reason is XmlUnitTestResultPrinter is totally
673 // defined in gtest.cc and not expose to outside. What's more, as we don't run gtest in
674 // the parent process, we don't have gtest classes which are needed by XmlUnitTestResultPrinter.
WriteXmlResults(uint64_t elapsed_time_ns,time_t start_time)675 void Isolate::WriteXmlResults(uint64_t elapsed_time_ns, time_t start_time) {
676   FILE* fp = fopen(options_.xml_file().c_str(), "w");
677   if (fp == nullptr) {
678     printf("Cannot open xml file '%s': %s\n", options_.xml_file().c_str(), strerror(errno));
679     exit(1);
680   }
681 
682   const tm* time_struct = localtime(&start_time);
683   if (time_struct == nullptr) {
684     FATAL_PLOG("Unexpected failure from localtime");
685   }
686   char timestamp[40];
687   snprintf(timestamp, sizeof(timestamp), "%4d-%02d-%02dT%02d:%02d:%02d",
688            time_struct->tm_year + 1900, time_struct->tm_mon + 1, time_struct->tm_mday,
689            time_struct->tm_hour, time_struct->tm_min, time_struct->tm_sec);
690 
691   fputs("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n", fp);
692   fprintf(fp, "<testsuites tests=\"%zu\" failures=\"%zu\" disabled=\"0\" errors=\"0\"",
693           tests_.size(), total_fail_tests_ + total_timeout_tests_ + total_xpass_tests_);
694   fprintf(fp, " timestamp=\"%s\" time=\"%.3lf\" name=\"AllTests\">\n", timestamp,
695           double(elapsed_time_ns) / kNsPerMs);
696 
697   // Construct the suite information.
698   struct SuiteInfo {
699     std::string suite_name;
700     size_t fails = 0;
701     double elapsed_ms = 0;
702     std::vector<const Test*> tests;
703   };
704   std::string last_suite_name;
705   std::vector<SuiteInfo> suites;
706   SuiteInfo* info = nullptr;
707   for (const auto& entry : finished_) {
708     const Test* test = entry.second.get();
709     const std::string& suite_name = test->suite_name();
710     if (test->result() == TEST_XFAIL) {
711       // Skip XFAIL tests.
712       continue;
713     }
714     if (last_suite_name != suite_name) {
715       SuiteInfo suite_info{.suite_name = suite_name.substr(0, suite_name.size() - 1)};
716       last_suite_name = suite_name;
717       suites.push_back(suite_info);
718       info = &suites.back();
719     }
720     info->tests.push_back(test);
721     info->elapsed_ms += double(test->RunTimeNs()) / kNsPerMs;
722     if (test->result() != TEST_PASS) {
723       info->fails++;
724     }
725   }
726 
727   for (auto& suite_entry : suites) {
728     fprintf(fp,
729             "  <testsuite name=\"%s\" tests=\"%zu\" failures=\"%zu\" disabled=\"0\" errors=\"0\"",
730             suite_entry.suite_name.c_str(), suite_entry.tests.size(), suite_entry.fails);
731     fprintf(fp, " time=\"%.3lf\">\n", suite_entry.elapsed_ms);
732 
733     for (auto test : suite_entry.tests) {
734       fprintf(fp, "    <testcase name=\"%s\" status=\"run\" time=\"%.3lf\" classname=\"%s\"",
735               test->test_name().c_str(), double(test->RunTimeNs()) / kNsPerMs,
736               suite_entry.suite_name.c_str());
737       if (test->result() == TEST_PASS) {
738         fputs(" />\n", fp);
739       } else {
740         fputs(">\n", fp);
741         const std::string escaped_output = XmlEscape(test->output());
742         fprintf(fp, "      <failure message=\"%s\" type=\"\">\n", escaped_output.c_str());
743         fputs("      </failure>\n", fp);
744         fputs("    </testcase>\n", fp);
745       }
746     }
747     fputs("  </testsuite>\n", fp);
748   }
749   fputs("</testsuites>\n", fp);
750   fclose(fp);
751 }
752 
753 // Note: We can't use android::base::HwTimeoutMultiplier due to linking issues.
GetHwTimeoutMultiplier()754 uint64_t GetHwTimeoutMultiplier() {
755 #if defined(__BIONIC__)
756   char multiplier_string[PROP_VALUE_MAX] = {};
757   if (__system_property_get("ro.hw_timeout_multiplier", multiplier_string) != 0) {
758     char* end = nullptr;
759     auto multiplier = strtoull(multiplier_string, &end, 10);
760     if (end != nullptr && *end == '\0' && multiplier != ULLONG_MAX) {
761       return multiplier;
762     }
763   }
764 #endif
765 
766   return 1;
767 }
768 
Run()769 int Isolate::Run() {
770   uint64_t multiplier = GetHwTimeoutMultiplier();
771   slow_threshold_ns_ = options_.slow_threshold_ms() * kNsPerMs * multiplier;
772   deadline_threshold_ns_ = options_.deadline_threshold_ms() * kNsPerMs * multiplier;
773 
774   bool sharding_enabled = options_.total_shards() > 1;
775   if (sharding_enabled &&
776       (options_.shard_index() < 0 || options_.shard_index() >= options_.total_shards())) {
777     ColoredPrintf(COLOR_RED,
778                   "Invalid environment variables: we require 0 <= GTEST_SHARD_INDEX < "
779                   "GTEST_TOTAL_SHARDS, but you have GTEST_SHARD_INDEX=%" PRId64
780                   ", GTEST_TOTAL_SHARDS=%" PRId64,
781                   options_.shard_index(), options_.total_shards());
782     printf("\n");
783     return 1;
784   }
785 
786   if (!options_.filter().empty()) {
787     ColoredPrintf(COLOR_YELLOW, "Note: Google Test filter = %s", options_.filter().c_str());
788     printf("\n");
789   }
790 
791   if (sharding_enabled) {
792     ColoredPrintf(COLOR_YELLOW, "Note: This is test shard %" PRId64 " of %" PRId64,
793                   options_.shard_index() + 1, options_.total_shards());
794     printf("\n");
795   }
796 
797   EnumerateTests();
798 
799   // Stop default result printer to avoid environment setup/teardown information for each test.
800   delete ::testing::UnitTest::GetInstance()->listeners().Release(
801       ::testing::UnitTest::GetInstance()->listeners().default_result_printer());
802   ::testing::UnitTest::GetInstance()->listeners().Append(new TestResultPrinter);
803   RegisterSignalHandler();
804 
805   std::string job_info("Running " + PluralizeString(total_tests_, " test") + " from " +
806                        PluralizeString(total_suites_, " test suite") + " (" +
807                        PluralizeString(options_.job_count(), " job") + ").");
808 
809   int exit_code = 0;
810   for (int i = 0; options_.num_iterations() < 0 || i < options_.num_iterations(); i++) {
811     if (i > 0) {
812       printf("\nRepeating all tests (iteration %d) . . .\n\n", i + 1);
813     }
814     ColoredPrintf(COLOR_GREEN, "[==========]");
815     printf(" %s\n", job_info.c_str());
816     fflush(stdout);
817 
818     time_t start_time = time(nullptr);
819     uint64_t time_ns = NanoTime();
820     RunAllTests();
821     time_ns = NanoTime() - time_ns;
822 
823     PrintFooter(time_ns);
824 
825     if (!options_.xml_file().empty()) {
826       WriteXmlResults(time_ns, start_time);
827     }
828 
829     if (total_pass_tests_ + total_skipped_tests_ + total_xfail_tests_ != tests_.size()) {
830       exit_code = 1;
831       if (options_.stop_on_error() && options_.num_iterations() > 1) {
832         printf("\nTerminating repeat run due to failing tests (iteration %d).\n", i + 1);
833         break;
834       }
835     }
836   }
837 
838   return exit_code;
839 }
840 
841 }  // namespace gtest_extras
842 }  // namespace android
843