1 /*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <inttypes.h>
18 #include <libgen.h>
19 #include <signal.h>
20 #include <sys/mman.h>
21 #include <sys/prctl.h>
22 #include <sys/utsname.h>
23 #include <time.h>
24 #include <unistd.h>
25 #include <chrono>
26 #include <filesystem>
27 #include <optional>
28 #include <set>
29 #include <string>
30 #include <unordered_map>
31 #include <unordered_set>
32 #include <vector>
33
34 #include <android-base/file.h>
35 #include <android-base/logging.h>
36 #include <android-base/parseint.h>
37 #include <android-base/stringprintf.h>
38 #include <android-base/strings.h>
39 #include <android-base/unique_fd.h>
40
41 #pragma clang diagnostic push
42 #pragma clang diagnostic ignored "-Wunused-parameter"
43 #include <llvm/Support/MemoryBuffer.h>
44 #pragma clang diagnostic pop
45
46 #if defined(__ANDROID__)
47 #include <android-base/properties.h>
48 #endif
49 #include <unwindstack/Error.h>
50
51 #include "BranchListFile.h"
52 #include "CallChainJoiner.h"
53 #include "ETMRecorder.h"
54 #include "IOEventLoop.h"
55 #include "JITDebugReader.h"
56 #include "MapRecordReader.h"
57 #include "OfflineUnwinder.h"
58 #include "ProbeEvents.h"
59 #include "RecordFilter.h"
60 #include "cmd_record_impl.h"
61 #include "command.h"
62 #include "environment.h"
63 #include "event_selection_set.h"
64 #include "event_type.h"
65 #include "kallsyms.h"
66 #include "read_apk.h"
67 #include "read_elf.h"
68 #include "read_symbol_map.h"
69 #include "record.h"
70 #include "record_file.h"
71 #include "thread_tree.h"
72 #include "tracing.h"
73 #include "utils.h"
74 #include "workload.h"
75
76 namespace simpleperf {
77 namespace {
78
79 using android::base::ParseUint;
80 using android::base::Realpath;
81
82 static std::string default_measured_event_type = "cpu-cycles";
83
84 static std::unordered_map<std::string, uint64_t> branch_sampling_type_map = {
85 {"u", PERF_SAMPLE_BRANCH_USER},
86 {"k", PERF_SAMPLE_BRANCH_KERNEL},
87 {"any", PERF_SAMPLE_BRANCH_ANY},
88 {"any_call", PERF_SAMPLE_BRANCH_ANY_CALL},
89 {"any_ret", PERF_SAMPLE_BRANCH_ANY_RETURN},
90 {"ind_call", PERF_SAMPLE_BRANCH_IND_CALL},
91 };
92
93 static std::unordered_map<std::string, int> clockid_map = {
94 {"realtime", CLOCK_REALTIME},
95 {"monotonic", CLOCK_MONOTONIC},
96 {"monotonic_raw", CLOCK_MONOTONIC_RAW},
97 {"boottime", CLOCK_BOOTTIME},
98 };
99
100 // The max size of records dumped by kernel is 65535, and dump stack size
101 // should be a multiply of 8, so MAX_DUMP_STACK_SIZE is 65528.
102 static constexpr uint32_t MAX_DUMP_STACK_SIZE = 65528;
103
104 // The max allowed pages in mapped buffer is decided by rlimit(RLIMIT_MEMLOCK).
105 // Here 1024 is a desired value for pages in mapped buffer. If mapped
106 // successfully, the buffer size = 1024 * 4K (page size) = 4M.
107 static constexpr size_t DESIRED_PAGES_IN_MAPPED_BUFFER = 1024;
108
109 // Cache size used by CallChainJoiner to cache call chains in memory.
110 static constexpr size_t DEFAULT_CALL_CHAIN_JOINER_CACHE_SIZE = 8 * kMegabyte;
111
112 static constexpr size_t kDefaultAuxBufferSize = 4 * kMegabyte;
113
114 // On Pixel 3, it takes about 1ms to enable ETM, and 16-40ms to disable ETM and copy 4M ETM data.
115 // So make default interval to 100ms.
116 static constexpr uint32_t kDefaultEtmDataFlushIntervalInMs = 100;
117
118 struct TimeStat {
119 uint64_t prepare_recording_time = 0;
120 uint64_t start_recording_time = 0;
121 uint64_t stop_recording_time = 0;
122 uint64_t finish_recording_time = 0;
123 uint64_t post_process_time = 0;
124 };
125
GetDefaultRecordBufferSize(bool system_wide_recording)126 std::optional<size_t> GetDefaultRecordBufferSize(bool system_wide_recording) {
127 // Currently, the record buffer size in user-space is set to match the kernel buffer size on a
128 // 8 core system. For system-wide recording, it is 8K pages * 4K page_size * 8 cores = 256MB.
129 // For non system-wide recording, it is 1K pages * 4K page_size * 8 cores = 64MB.
130 // But on devices with memory >= 4GB, we increase buffer size to 256MB. This reduces the chance
131 // of cutting samples, which can cause broken callchains.
132 static constexpr size_t kLowMemoryRecordBufferSize = 64 * kMegabyte;
133 static constexpr size_t kHighMemoryRecordBufferSize = 256 * kMegabyte;
134 static constexpr size_t kSystemWideRecordBufferSize = 256 * kMegabyte;
135 // Ideally we can use >= 4GB here. But the memory size shown in /proc/meminfo is like to be 3.x GB
136 // on a device with 4GB memory. So we have to use <= 3GB.
137 static constexpr uint64_t kLowMemoryLimit = 3 * kGigabyte;
138
139 if (system_wide_recording) {
140 return kSystemWideRecordBufferSize;
141 }
142 auto device_memory = GetMemorySize();
143 if (!device_memory.has_value()) {
144 return std::nullopt;
145 }
146 return device_memory.value() <= kLowMemoryLimit ? kLowMemoryRecordBufferSize
147 : kHighMemoryRecordBufferSize;
148 }
149
150 class RecordCommand : public Command {
151 public:
RecordCommand()152 RecordCommand()
153 : Command(
154 "record", "record sampling info in perf.data",
155 // clang-format off
156 "Usage: simpleperf record [options] [--] [command [command-args]]\n"
157 " Gather sampling information of running [command]. And -a/-p/-t option\n"
158 " can be used to change target of sampling information.\n"
159 " The default options are: -e cpu-cycles -f 4000 -o perf.data.\n"
160 "Select monitored threads:\n"
161 "-a System-wide collection. Use with --exclude-perf to exclude samples for\n"
162 " simpleperf process.\n"
163 #if defined(__ANDROID__)
164 "--app package_name Profile the process of an Android application.\n"
165 " On non-rooted devices, the app must be debuggable,\n"
166 " because we use run-as to switch to the app's context.\n"
167 #endif
168 "-p pid_or_process_name_regex1,pid_or_process_name_regex2,...\n"
169 " Record events on existing processes. Processes are searched either by pid\n"
170 " or process name regex. Mutually exclusive with -a.\n"
171 "-t tid1,tid2,... Record events on existing threads. Mutually exclusive with -a.\n"
172 "\n"
173 "Select monitored event types:\n"
174 "-e event1[:modifier1],event2[:modifier2],...\n"
175 " Select a list of events to record. An event can be:\n"
176 " 1) an event name listed in `simpleperf list`;\n"
177 " 2) a raw PMU event in rN format. N is a hex number.\n"
178 " For example, r1b selects event number 0x1b.\n"
179 " 3) a kprobe event added by --kprobe option.\n"
180 " Modifiers can be added to define how the event should be\n"
181 " monitored. Possible modifiers are:\n"
182 " u - monitor user space events only\n"
183 " k - monitor kernel space events only\n"
184 "--group event1[:modifier],event2[:modifier2],...\n"
185 " Similar to -e option. But events specified in the same --group\n"
186 " option are monitored as a group, and scheduled in and out at the\n"
187 " same time.\n"
188 "--trace-offcpu Generate samples when threads are scheduled off cpu.\n"
189 " Similar to \"-c 1 -e sched:sched_switch\".\n"
190 "--kprobe kprobe_event1,kprobe_event2,...\n"
191 " Add kprobe events during recording. The kprobe_event format is in\n"
192 " Documentation/trace/kprobetrace.rst in the kernel. Examples:\n"
193 " 'p:myprobe do_sys_openat2 $arg2:string' - add event kprobes:myprobe\n"
194 " 'r:myretprobe do_sys_openat2 $retval:s64' - add event kprobes:myretprobe\n"
195 "--add-counter event1,event2,... Add additional event counts in record samples. For example,\n"
196 " we can use `-e cpu-cycles --add-counter instructions` to\n"
197 " get samples for cpu-cycles event, while having instructions\n"
198 " event count for each sample.\n"
199 "\n"
200 "Select monitoring options:\n"
201 "-f freq Set event sample frequency. It means recording at most [freq]\n"
202 " samples every second. For non-tracepoint events, the default\n"
203 " option is -f 4000. A -f/-c option affects all event types\n"
204 " following it until meeting another -f/-c option. For example,\n"
205 " for \"-f 1000 -e cpu-cycles -c 1 -e sched:sched_switch\", cpu-cycles\n"
206 " has sample freq 1000, sched:sched_switch event has sample period 1.\n"
207 "-c count Set event sample period. It means recording one sample when\n"
208 " [count] events happen. For tracepoint events, the default option\n"
209 " is -c 1.\n"
210 "--call-graph fp | dwarf[,<dump_stack_size>]\n"
211 " Enable call graph recording. Use frame pointer or dwarf debug\n"
212 " frame as the method to parse call graph in stack.\n"
213 " Default is no call graph. Default dump_stack_size with -g is 65528.\n"
214 "-g Same as '--call-graph dwarf'.\n"
215 "--clockid clock_id Generate timestamps of samples using selected clock.\n"
216 " Possible values are: realtime, monotonic,\n"
217 " monotonic_raw, boottime, perf. If supported, default\n"
218 " is monotonic, otherwise is perf.\n"
219 "--cpu cpu_item1,cpu_item2,... Monitor events on selected cpus. cpu_item can be a number like\n"
220 " 1, or a range like 0-3. A --cpu option affects all event types\n"
221 " following it until meeting another --cpu option.\n"
222 "--delay time_in_ms Wait time_in_ms milliseconds before recording samples.\n"
223 "--duration time_in_sec Monitor for time_in_sec seconds instead of running\n"
224 " [command]. Here time_in_sec may be any positive\n"
225 " floating point number.\n"
226 "-j branch_filter1,branch_filter2,...\n"
227 " Enable taken branch stack sampling. Each sample captures a series\n"
228 " of consecutive taken branches.\n"
229 " The following filters are defined:\n"
230 " any: any type of branch\n"
231 " any_call: any function call or system call\n"
232 " any_ret: any function return or system call return\n"
233 " ind_call: any indirect branch\n"
234 " u: only when the branch target is at the user level\n"
235 " k: only when the branch target is in the kernel\n"
236 " This option requires at least one branch type among any, any_call,\n"
237 " any_ret, ind_call.\n"
238 "-b Enable taken branch stack sampling. Same as '-j any'.\n"
239 "-m mmap_pages Set pages used in the kernel to cache sample data for each cpu.\n"
240 " It should be a power of 2. If not set, the max possible value <= 1024\n"
241 " will be used.\n"
242 "--user-buffer-size <buffer_size> Set buffer size in userspace to cache sample data.\n"
243 " By default, it is %s.\n"
244 "--no-inherit Don't record created child threads/processes.\n"
245 "--cpu-percent <percent> Set the max percent of cpu time used for recording.\n"
246 " percent is in range [1-100], default is 25.\n"
247 "\n"
248 "--tp-filter filter_string Set filter_string for the previous tracepoint event.\n"
249 " Format is in Documentation/trace/events.rst in the kernel.\n"
250 " An example: 'prev_comm != \"simpleperf\" && (prev_pid > 1)'.\n"
251 "\n"
252 "Dwarf unwinding options:\n"
253 "--post-unwind=(yes|no) If `--call-graph dwarf` option is used, then the user's\n"
254 " stack will be recorded in perf.data and unwound while\n"
255 " recording by default. Use --post-unwind=yes to switch\n"
256 " to unwind after recording.\n"
257 "--no-unwind If `--call-graph dwarf` option is used, then the user's stack\n"
258 " will be unwound by default. Use this option to disable the\n"
259 " unwinding of the user's stack.\n"
260 "--no-callchain-joiner If `--call-graph dwarf` option is used, then by default\n"
261 " callchain joiner is used to break the 64k stack limit\n"
262 " and build more complete call graphs. However, the built\n"
263 " call graphs may not be correct in all cases.\n"
264 "--callchain-joiner-min-matching-nodes count\n"
265 " When callchain joiner is used, set the matched nodes needed to join\n"
266 " callchains. The count should be >= 1. By default it is 1.\n"
267 "--no-cut-samples Simpleperf uses a record buffer to cache records received from the kernel.\n"
268 " When the available space in the buffer reaches low level, the stack data in\n"
269 " samples is truncated to 1KB. When the available space reaches critical level,\n"
270 " it drops all samples. This option makes simpleperf not truncate stack data\n"
271 " when the available space reaches low level.\n"
272 "--keep-failed-unwinding-result Keep reasons for failed unwinding cases\n"
273 "--keep-failed-unwinding-debug-info Keep debug info for failed unwinding cases\n"
274 "\n"
275 "Sample filter options:\n"
276 "--exclude-perf Exclude samples for simpleperf process.\n"
277 RECORD_FILTER_OPTION_HELP_MSG_FOR_RECORDING
278 "\n"
279 "Recording file options:\n"
280 "--no-dump-kernel-symbols Don't dump kernel symbols in perf.data. By default\n"
281 " kernel symbols will be dumped when needed.\n"
282 "--no-dump-symbols Don't dump symbols in perf.data. By default symbols are\n"
283 " dumped in perf.data, to support reporting in another\n"
284 " environment.\n"
285 "-o record_file_name Set record file name, default is perf.data.\n"
286 "--size-limit SIZE[K|M|G] Stop recording after SIZE bytes of records.\n"
287 " Default is unlimited.\n"
288 "--symfs <dir> Look for files with symbols relative to this directory.\n"
289 " This option is used to provide files with symbol table and\n"
290 " debug information, which are used for unwinding and dumping symbols.\n"
291 "--add-meta-info key=value Add extra meta info, which will be stored in the recording file.\n"
292 "\n"
293 "ETM recording options:\n"
294 "--addr-filter filter_str1,filter_str2,...\n"
295 " Provide address filters for cs-etm instruction tracing.\n"
296 " filter_str accepts below formats:\n"
297 " 'filter <addr-range>' -- trace instructions in a range\n"
298 " 'start <addr>' -- start tracing when ip is <addr>\n"
299 " 'stop <addr>' -- stop tracing when ip is <addr>\n"
300 " <addr-range> accepts below formats:\n"
301 " <file_path> -- code sections in a binary file\n"
302 " <vaddr_start>-<vaddr_end>@<file_path> -- part of a binary file\n"
303 " <kernel_addr_start>-<kernel_addr_end> -- part of kernel space\n"
304 " <addr> accepts below formats:\n"
305 " <vaddr>@<file_path> -- virtual addr in a binary file\n"
306 " <kernel_addr> -- a kernel address\n"
307 " Examples:\n"
308 " 'filter 0x456-0x480@/system/lib/libc.so'\n"
309 " 'start 0x456@/system/lib/libc.so,stop 0x480@/system/lib/libc.so'\n"
310 "--aux-buffer-size <buffer_size> Set aux buffer size, only used in cs-etm event type.\n"
311 " Need to be power of 2 and page size aligned.\n"
312 " Used memory size is (buffer_size * (cpu_count + 1).\n"
313 " Default is 4M.\n"
314 "--decode-etm Convert ETM data into branch lists while recording.\n"
315 "--binary binary_name Used with --decode-etm to only generate data for binaries\n"
316 " matching binary_name regex.\n"
317 "--record-timestamp Generate timestamp packets in ETM stream.\n"
318 "--record-cycles Generate cycle count packets in ETM stream.\n"
319 "--cycle-threshold <threshold> Set cycle count counter threshold for ETM cycle count packets.\n"
320 "--etm-flush-interval <interval> Set the interval between ETM data flushes from the ETR buffer\n"
321 " to the perf event buffer (in milliseconds). Default is 100 ms.\n"
322 "\n"
323 "Other options:\n"
324 "--exit-with-parent Stop recording when the thread starting simpleperf dies.\n"
325 "--use-cmd-exit-code Exit with the same exit code as the monitored cmdline.\n"
326 "--start_profiling_fd fd_no After starting profiling, write \"STARTED\" to\n"
327 " <fd_no>, then close <fd_no>.\n"
328 "--stdio-controls-profiling Use stdin/stdout to pause/resume profiling.\n"
329 #if defined(__ANDROID__)
330 "--in-app We are already running in the app's context.\n"
331 "--tracepoint-events file_name Read tracepoint events from [file_name] instead of tracefs.\n"
332 #endif
333 #if 0
334 // Below options are only used internally and shouldn't be visible to the public.
335 "--out-fd <fd> Write perf.data to a file descriptor.\n"
336 "--stop-signal-fd <fd> Stop recording when fd is readable.\n"
337 #endif
338 // clang-format on
339 ),
340 system_wide_collection_(false),
341 branch_sampling_(0),
342 fp_callchain_sampling_(false),
343 dwarf_callchain_sampling_(false),
344 dump_stack_size_in_dwarf_sampling_(MAX_DUMP_STACK_SIZE),
345 unwind_dwarf_callchain_(true),
346 post_unwind_(false),
347 child_inherit_(true),
348 duration_in_sec_(0),
349 can_dump_kernel_symbols_(true),
350 dump_symbols_(true),
351 event_selection_set_(false),
352 mmap_page_range_(std::make_pair(1, DESIRED_PAGES_IN_MAPPED_BUFFER)),
353 record_filename_("perf.data"),
354 sample_record_count_(0),
355 in_app_context_(false),
356 trace_offcpu_(false),
357 exclude_kernel_callchain_(false),
358 allow_callchain_joiner_(true),
359 callchain_joiner_min_matching_nodes_(1u),
360 last_record_timestamp_(0u),
361 record_filter_(thread_tree_) {
362 // If we run `adb shell simpleperf record xxx` and stop profiling by ctrl-c, adb closes
363 // sockets connecting simpleperf. After that, simpleperf will receive SIGPIPE when writing
364 // to stdout/stderr, which is a problem when we use '--app' option. So ignore SIGPIPE to
365 // finish properly.
366 signal(SIGPIPE, SIG_IGN);
367 }
368
369 std::string LongHelpString() const override;
370 void Run(const std::vector<std::string>& args, int* exit_code) override;
Run(const std::vector<std::string> & args)371 bool Run(const std::vector<std::string>& args) override {
372 int exit_code;
373 Run(args, &exit_code);
374 return exit_code == 0;
375 }
376
377 private:
378 bool ParseOptions(const std::vector<std::string>& args, std::vector<std::string>* non_option_args,
379 ProbeEvents& probe_events);
380 bool AdjustPerfEventLimit();
381 bool PrepareRecording(Workload* workload);
382 bool DoRecording(Workload* workload);
383 bool PostProcessRecording(const std::vector<std::string>& args);
384 // pre recording functions
385 bool TraceOffCpu();
386 bool SetEventSelectionFlags();
387 bool CreateAndInitRecordFile();
388 std::unique_ptr<RecordFileWriter> CreateRecordFile(const std::string& filename,
389 const EventAttrIds& attrs);
390 bool DumpKernelSymbol();
391 bool DumpTracingData();
392 bool DumpMaps();
393 bool DumpAuxTraceInfo();
394
395 // recording functions
396 bool ProcessRecord(Record* record);
397 bool ShouldOmitRecord(Record* record);
398 bool DumpMapsForRecord(Record* record);
399 bool SaveRecordForPostUnwinding(Record* record);
400 bool SaveRecordAfterUnwinding(Record* record);
401 bool SaveRecordWithoutUnwinding(Record* record);
402 bool ProcessJITDebugInfo(std::vector<JITDebugInfo> debug_info, bool sync_kernel_records);
403 bool ProcessControlCmd(IOEventLoop* loop);
404 void UpdateRecord(Record* record);
405 bool UnwindRecord(SampleRecord& r);
406 bool KeepFailedUnwindingResult(const SampleRecord& r, const std::vector<uint64_t>& ips,
407 const std::vector<uint64_t>& sps);
408
409 // post recording functions
410 std::unique_ptr<RecordFileReader> MoveRecordFile(const std::string& old_filename);
411 bool MergeMapRecords();
412 bool PostUnwindRecords();
413 bool JoinCallChains();
414 bool DumpAdditionalFeatures(const std::vector<std::string>& args);
415 bool DumpBuildIdFeature();
416 bool DumpFileFeature();
417 bool DumpMetaInfoFeature(bool kernel_symbols_available);
418 bool DumpDebugUnwindFeature(const std::unordered_set<Dso*>& dso_set);
419 void CollectHitFileInfo(const SampleRecord& r, std::unordered_set<Dso*>* dso_set);
420 bool DumpETMBranchListFeature();
421
422 bool system_wide_collection_;
423 uint64_t branch_sampling_;
424 bool fp_callchain_sampling_;
425 bool dwarf_callchain_sampling_;
426 uint32_t dump_stack_size_in_dwarf_sampling_;
427 bool unwind_dwarf_callchain_;
428 bool post_unwind_;
429 bool keep_failed_unwinding_result_ = false;
430 bool keep_failed_unwinding_debug_info_ = false;
431 std::unique_ptr<OfflineUnwinder> offline_unwinder_;
432 bool child_inherit_;
433 uint64_t delay_in_ms_ = 0;
434 double duration_in_sec_;
435 bool can_dump_kernel_symbols_;
436 bool dump_symbols_;
437 std::string clockid_;
438 EventSelectionSet event_selection_set_;
439
440 std::pair<size_t, size_t> mmap_page_range_;
441 std::optional<size_t> user_buffer_size_;
442 size_t aux_buffer_size_ = kDefaultAuxBufferSize;
443
444 ThreadTree thread_tree_;
445 std::string record_filename_;
446 android::base::unique_fd out_fd_;
447 std::unique_ptr<RecordFileWriter> record_file_writer_;
448 android::base::unique_fd stop_signal_fd_;
449
450 uint64_t sample_record_count_;
451 android::base::unique_fd start_profiling_fd_;
452 bool stdio_controls_profiling_ = false;
453
454 std::string app_package_name_;
455 bool in_app_context_;
456 bool trace_offcpu_;
457 bool exclude_kernel_callchain_;
458 uint64_t size_limit_in_bytes_ = 0;
459 uint64_t max_sample_freq_ = DEFAULT_SAMPLE_FREQ_FOR_NONTRACEPOINT_EVENT;
460 size_t cpu_time_max_percent_ = 25;
461
462 // For CallChainJoiner
463 bool allow_callchain_joiner_;
464 size_t callchain_joiner_min_matching_nodes_;
465 std::unique_ptr<CallChainJoiner> callchain_joiner_;
466 bool allow_truncating_samples_ = true;
467
468 std::unique_ptr<JITDebugReader> jit_debug_reader_;
469 uint64_t last_record_timestamp_; // used to insert Mmap2Records for JIT debug info
470 TimeStat time_stat_;
471 EventAttrWithId dumping_attr_id_;
472 // In system wide recording, record if we have dumped map info for a process.
473 std::unordered_set<pid_t> dumped_processes_;
474 bool exclude_perf_ = false;
475 RecordFilter record_filter_;
476
477 std::optional<MapRecordReader> map_record_reader_;
478 std::optional<MapRecordThread> map_record_thread_;
479
480 std::unordered_map<std::string, std::string> extra_meta_info_;
481 bool use_cmd_exit_code_ = false;
482 std::vector<std::string> add_counters_;
483
484 std::unique_ptr<ETMBranchListGenerator> etm_branch_list_generator_;
485 std::unique_ptr<RegEx> binary_name_regex_;
486 std::chrono::milliseconds etm_flush_interval_{kDefaultEtmDataFlushIntervalInMs};
487 };
488
LongHelpString() const489 std::string RecordCommand::LongHelpString() const {
490 uint64_t process_buffer_size = 0;
491 uint64_t system_wide_buffer_size = 0;
492 if (auto size = GetDefaultRecordBufferSize(false); size) {
493 process_buffer_size = size.value() / kMegabyte;
494 }
495 if (auto size = GetDefaultRecordBufferSize(true); size) {
496 system_wide_buffer_size = size.value() / kMegabyte;
497 }
498 std::string buffer_size_str;
499 if (process_buffer_size == system_wide_buffer_size) {
500 buffer_size_str = android::base::StringPrintf("%" PRIu64 "M", process_buffer_size);
501 } else {
502 buffer_size_str =
503 android::base::StringPrintf("%" PRIu64 "M for process recording and %" PRIu64
504 "M\n for system wide recording",
505 process_buffer_size, system_wide_buffer_size);
506 }
507 return android::base::StringPrintf(long_help_string_.c_str(), buffer_size_str.c_str());
508 }
509
Run(const std::vector<std::string> & args,int * exit_code)510 void RecordCommand::Run(const std::vector<std::string>& args, int* exit_code) {
511 *exit_code = 1;
512 time_stat_.prepare_recording_time = GetSystemClock();
513 ScopedCurrentArch scoped_arch(GetMachineArch());
514
515 if (!CheckPerfEventLimit()) {
516 return;
517 }
518 AllowMoreOpenedFiles();
519
520 std::vector<std::string> workload_args;
521 ProbeEvents probe_events(event_selection_set_);
522 if (!ParseOptions(args, &workload_args, probe_events)) {
523 return;
524 }
525 if (!AdjustPerfEventLimit()) {
526 return;
527 }
528 std::unique_ptr<ScopedTempFiles> scoped_temp_files =
529 ScopedTempFiles::Create(android::base::Dirname(record_filename_));
530 if (!scoped_temp_files) {
531 PLOG(ERROR) << "Can't create output file in directory "
532 << android::base::Dirname(record_filename_);
533 return;
534 }
535 if (!app_package_name_.empty() && !in_app_context_) {
536 // Some users want to profile non debuggable apps on rooted devices. If we use run-as,
537 // it will be impossible when using --app. So don't switch to app's context when we are
538 // root.
539 if (!IsRoot()) {
540 // Running simpleperf in app context doesn't allow running child command. So no need to
541 // consider exit code of child command here.
542 *exit_code = RunInAppContext(app_package_name_, "record", args, workload_args.size(),
543 record_filename_, true)
544 ? 0
545 : 1;
546 return;
547 }
548 }
549 std::unique_ptr<Workload> workload;
550 if (!workload_args.empty()) {
551 workload = Workload::CreateWorkload(workload_args);
552 if (workload == nullptr) {
553 return;
554 }
555 }
556 if (!PrepareRecording(workload.get())) {
557 return;
558 }
559 time_stat_.start_recording_time = GetSystemClock();
560 if (!DoRecording(workload.get()) || !PostProcessRecording(args)) {
561 return;
562 }
563 if (use_cmd_exit_code_ && workload) {
564 workload->WaitChildProcess(false, exit_code);
565 } else {
566 *exit_code = 0;
567 }
568 }
569
PrepareRecording(Workload * workload)570 bool RecordCommand::PrepareRecording(Workload* workload) {
571 // 1. Prepare in other modules.
572 PrepareVdsoFile();
573
574 // 2. Add default event type.
575 if (event_selection_set_.empty()) {
576 std::string event_type = default_measured_event_type;
577 if (GetTargetArch() == ARCH_X86_32 || GetTargetArch() == ARCH_X86_64 ||
578 GetTargetArch() == ARCH_RISCV64) {
579 // Emulators may not support hardware events. So switch to cpu-clock when cpu-cycles isn't
580 // available.
581 if (!IsHardwareEventSupported()) {
582 event_type = "cpu-clock";
583 LOG(INFO) << "Hardware events are not available, switch to cpu-clock.";
584 }
585 }
586 if (!event_selection_set_.AddEventType(event_type)) {
587 return false;
588 }
589 }
590
591 // 3. Process options before opening perf event files.
592 exclude_kernel_callchain_ = event_selection_set_.ExcludeKernel();
593 if (trace_offcpu_ && !TraceOffCpu()) {
594 return false;
595 }
596 if (!add_counters_.empty()) {
597 if (child_inherit_) {
598 LOG(ERROR) << "--no-inherit is needed when using --add-counter.";
599 return false;
600 }
601 if (!event_selection_set_.AddCounters(add_counters_)) {
602 return false;
603 }
604 }
605 if (!SetEventSelectionFlags()) {
606 return false;
607 }
608 if (unwind_dwarf_callchain_) {
609 bool collect_stat = keep_failed_unwinding_result_;
610 offline_unwinder_ = OfflineUnwinder::Create(collect_stat);
611 }
612 if (unwind_dwarf_callchain_ && allow_callchain_joiner_) {
613 callchain_joiner_.reset(new CallChainJoiner(DEFAULT_CALL_CHAIN_JOINER_CACHE_SIZE,
614 callchain_joiner_min_matching_nodes_, false));
615 }
616
617 // 4. Add monitored targets.
618 bool need_to_check_targets = false;
619 if (system_wide_collection_) {
620 event_selection_set_.AddMonitoredThreads({-1});
621 } else if (!event_selection_set_.HasMonitoredTarget()) {
622 if (workload != nullptr) {
623 event_selection_set_.AddMonitoredProcesses({workload->GetPid()});
624 event_selection_set_.SetEnableCondition(false, true);
625 } else if (!app_package_name_.empty()) {
626 // If app process is not created, wait for it. This allows simpleperf starts before
627 // app process. In this way, we can have a better support of app start-up time profiling.
628 std::set<pid_t> pids = WaitForAppProcesses(app_package_name_);
629 event_selection_set_.AddMonitoredProcesses(pids);
630 need_to_check_targets = true;
631 } else {
632 LOG(ERROR) << "No threads to monitor. Try `simpleperf help record` for help";
633 return false;
634 }
635 } else {
636 need_to_check_targets = true;
637 }
638 if (delay_in_ms_ != 0 || event_selection_set_.HasAuxTrace()) {
639 event_selection_set_.SetEnableCondition(false, false);
640 }
641
642 // Profiling JITed/interpreted Java code is supported starting from Android P.
643 // Also support profiling art interpreter on host.
644 if (GetAndroidVersion() >= kAndroidVersionP || GetAndroidVersion() == 0) {
645 // JIT symfiles are stored in temporary files, and are deleted after recording. But if
646 // `-g --no-unwind` option is used, we want to keep symfiles to support unwinding in
647 // the debug-unwind cmd.
648 auto symfile_option = (dwarf_callchain_sampling_ && !unwind_dwarf_callchain_)
649 ? JITDebugReader::SymFileOption::kKeepSymFiles
650 : JITDebugReader::SymFileOption::kDropSymFiles;
651 auto sync_option = (clockid_ == "monotonic") ? JITDebugReader::SyncOption::kSyncWithRecords
652 : JITDebugReader::SyncOption::kNoSync;
653 jit_debug_reader_.reset(new JITDebugReader(record_filename_, symfile_option, sync_option));
654 // To profile java code, need to dump maps containing vdex files, which are not executable.
655 event_selection_set_.SetRecordNotExecutableMaps(true);
656 }
657
658 // 5. Open perf event files and create mapped buffers.
659 if (!event_selection_set_.OpenEventFiles()) {
660 return false;
661 }
662 size_t record_buffer_size = 0;
663 if (user_buffer_size_.has_value()) {
664 record_buffer_size = user_buffer_size_.value();
665 } else {
666 auto default_size = GetDefaultRecordBufferSize(system_wide_collection_);
667 if (!default_size.has_value()) {
668 return false;
669 }
670 record_buffer_size = default_size.value();
671 }
672 if (!event_selection_set_.MmapEventFiles(mmap_page_range_.first, mmap_page_range_.second,
673 aux_buffer_size_, record_buffer_size,
674 allow_truncating_samples_, exclude_perf_)) {
675 return false;
676 }
677 auto callback = std::bind(&RecordCommand::ProcessRecord, this, std::placeholders::_1);
678 if (!event_selection_set_.PrepareToReadMmapEventData(callback)) {
679 return false;
680 }
681
682 // 6. Create perf.data.
683 if (!CreateAndInitRecordFile()) {
684 return false;
685 }
686
687 // 7. Add read/signal/periodic Events.
688 if (need_to_check_targets && !event_selection_set_.StopWhenNoMoreTargets()) {
689 return false;
690 }
691 IOEventLoop* loop = event_selection_set_.GetIOEventLoop();
692 auto exit_loop_callback = [loop]() { return loop->ExitLoop(); };
693 if (!loop->AddSignalEvents({SIGCHLD, SIGINT, SIGTERM}, exit_loop_callback, IOEventHighPriority)) {
694 return false;
695 }
696
697 // Only add an event for SIGHUP if we didn't inherit SIG_IGN (e.g. from nohup).
698 if (!SignalIsIgnored(SIGHUP)) {
699 if (!loop->AddSignalEvent(SIGHUP, exit_loop_callback, IOEventHighPriority)) {
700 return false;
701 }
702 }
703 if (stop_signal_fd_ != -1) {
704 if (!loop->AddReadEvent(stop_signal_fd_, exit_loop_callback, IOEventHighPriority)) {
705 return false;
706 }
707 }
708
709 if (delay_in_ms_ != 0) {
710 auto delay_callback = [this]() {
711 if (!event_selection_set_.SetEnableEvents(true)) {
712 return false;
713 }
714 if (!system_wide_collection_) {
715 // Dump maps in case there are new maps created while delaying.
716 return DumpMaps();
717 }
718 return true;
719 };
720 if (!loop->AddOneTimeEvent(SecondToTimeval(delay_in_ms_ / 1000), delay_callback)) {
721 return false;
722 }
723 }
724 if (duration_in_sec_ != 0) {
725 if (!loop->AddPeriodicEvent(
726 SecondToTimeval(duration_in_sec_), [loop]() { return loop->ExitLoop(); },
727 IOEventHighPriority)) {
728 return false;
729 }
730 }
731 if (stdio_controls_profiling_) {
732 if (!loop->AddReadEvent(0, [this, loop]() { return ProcessControlCmd(loop); })) {
733 return false;
734 }
735 }
736 if (jit_debug_reader_) {
737 auto callback = [this](std::vector<JITDebugInfo> debug_info, bool sync_kernel_records) {
738 return ProcessJITDebugInfo(std::move(debug_info), sync_kernel_records);
739 };
740 if (!jit_debug_reader_->RegisterDebugInfoCallback(loop, callback)) {
741 return false;
742 }
743 if (!system_wide_collection_) {
744 std::set<pid_t> pids = event_selection_set_.GetMonitoredProcesses();
745 for (pid_t tid : event_selection_set_.GetMonitoredThreads()) {
746 pid_t pid;
747 if (GetProcessForThread(tid, &pid)) {
748 pids.insert(pid);
749 }
750 }
751 for (pid_t pid : pids) {
752 if (!jit_debug_reader_->MonitorProcess(pid)) {
753 return false;
754 }
755 }
756 if (!jit_debug_reader_->ReadAllProcesses()) {
757 return false;
758 }
759 }
760 }
761 if (event_selection_set_.HasAuxTrace()) {
762 // ETM events can only be enabled successfully after MmapEventFiles().
763 if (delay_in_ms_ == 0 && !event_selection_set_.IsEnabledOnExec()) {
764 if (!event_selection_set_.EnableETMEvents()) {
765 return false;
766 }
767 }
768 // ETM data is dumped to kernel buffer only when there is no thread traced by ETM. It happens
769 // either when all monitored threads are scheduled off cpu, or when all etm perf events are
770 // disabled.
771 // If ETM data isn't dumped to kernel buffer in time, overflow parts will be dropped. This
772 // makes less than expected data, especially in system wide recording. So add a periodic event
773 // to flush etm data by temporarily disable all perf events.
774 auto etm_flush = [this]() {
775 return event_selection_set_.DisableETMEvents() && event_selection_set_.EnableETMEvents();
776 };
777 if (!loop->AddPeriodicEvent(SecondToTimeval(etm_flush_interval_.count() / 1000.0), etm_flush)) {
778 return false;
779 }
780
781 if (etm_branch_list_generator_) {
782 if (exclude_perf_) {
783 etm_branch_list_generator_->SetExcludePid(getpid());
784 }
785 if (binary_name_regex_) {
786 etm_branch_list_generator_->SetBinaryFilter(binary_name_regex_.get());
787 }
788 }
789 }
790 return true;
791 }
792
DoRecording(Workload * workload)793 bool RecordCommand::DoRecording(Workload* workload) {
794 // Write records in mapped buffers of perf_event_files to output file while workload is running.
795 if (workload != nullptr && !workload->IsStarted() && !workload->Start()) {
796 return false;
797 }
798 if (start_profiling_fd_.get() != -1) {
799 if (!android::base::WriteStringToFd("STARTED", start_profiling_fd_)) {
800 PLOG(ERROR) << "failed to write to start_profiling_fd_";
801 }
802 start_profiling_fd_.reset();
803 }
804 if (stdio_controls_profiling_) {
805 printf("started\n");
806 fflush(stdout);
807 }
808 if (!event_selection_set_.GetIOEventLoop()->RunLoop()) {
809 return false;
810 }
811 time_stat_.stop_recording_time = GetSystemClock();
812 if (event_selection_set_.HasAuxTrace()) {
813 // Disable ETM events to flush the last ETM data.
814 if (!event_selection_set_.DisableETMEvents()) {
815 return false;
816 }
817 }
818 if (!event_selection_set_.SyncKernelBuffer()) {
819 return false;
820 }
821 event_selection_set_.CloseEventFiles();
822 time_stat_.finish_recording_time = GetSystemClock();
823 uint64_t recording_time = time_stat_.finish_recording_time - time_stat_.start_recording_time;
824 LOG(INFO) << "Recorded for " << recording_time / 1e9 << " seconds. Start post processing.";
825 return true;
826 }
827
WriteRecordDataToOutFd(const std::string & in_filename,android::base::unique_fd out_fd)828 static bool WriteRecordDataToOutFd(const std::string& in_filename,
829 android::base::unique_fd out_fd) {
830 android::base::unique_fd in_fd(FileHelper::OpenReadOnly(in_filename));
831 if (in_fd == -1) {
832 PLOG(ERROR) << "Failed to open " << in_filename;
833 return false;
834 }
835 char buf[8192];
836 while (true) {
837 ssize_t n = TEMP_FAILURE_RETRY(read(in_fd, buf, sizeof(buf)));
838 if (n < 0) {
839 PLOG(ERROR) << "Failed to read " << in_filename;
840 return false;
841 }
842 if (n == 0) {
843 break;
844 }
845 if (!android::base::WriteFully(out_fd, buf, n)) {
846 PLOG(ERROR) << "Failed to write to out_fd";
847 return false;
848 }
849 }
850 unlink(in_filename.c_str());
851 return true;
852 }
853
PostProcessRecording(const std::vector<std::string> & args)854 bool RecordCommand::PostProcessRecording(const std::vector<std::string>& args) {
855 // 1. Read records left in the buffer.
856 if (!event_selection_set_.FinishReadMmapEventData()) {
857 return false;
858 }
859
860 // 2. Merge map records dumped while recording by map record thread.
861 if (map_record_thread_) {
862 if (!map_record_thread_->Join() || !MergeMapRecords()) {
863 return false;
864 }
865 }
866
867 // 3. Post unwind dwarf callchain.
868 if (unwind_dwarf_callchain_ && post_unwind_) {
869 if (!PostUnwindRecords()) {
870 return false;
871 }
872 }
873
874 // 4. Optionally join Callchains.
875 if (callchain_joiner_) {
876 JoinCallChains();
877 }
878
879 // 5. Dump additional features, and close record file.
880 if (!DumpAdditionalFeatures(args)) {
881 return false;
882 }
883 if (!record_file_writer_->Close()) {
884 return false;
885 }
886 if (out_fd_ != -1 && !WriteRecordDataToOutFd(record_filename_, std::move(out_fd_))) {
887 return false;
888 }
889 time_stat_.post_process_time = GetSystemClock();
890
891 // 6. Show brief record result.
892 auto record_stat = event_selection_set_.GetRecordStat();
893 if (event_selection_set_.HasAuxTrace()) {
894 LOG(INFO) << "Aux data traced: " << ReadableCount(record_stat.aux_data_size);
895 if (record_stat.lost_aux_data_size != 0) {
896 LOG(INFO) << "Aux data lost in user space: " << ReadableCount(record_stat.lost_aux_data_size)
897 << ", consider increasing userspace buffer size(--user-buffer-size).";
898 }
899 } else {
900 // Here we report all lost records as samples. This isn't accurate. Because records like
901 // MmapRecords are not samples. But It's easier for users to understand.
902 size_t userspace_lost_samples =
903 record_stat.userspace_lost_samples + record_stat.userspace_lost_non_samples;
904 size_t lost_samples = record_stat.kernelspace_lost_records + userspace_lost_samples;
905
906 std::stringstream os;
907 os << "Samples recorded: " << ReadableCount(sample_record_count_);
908 if (record_stat.userspace_truncated_stack_samples > 0) {
909 os << " (" << ReadableCount(record_stat.userspace_truncated_stack_samples)
910 << " with truncated stacks)";
911 }
912 os << ". Samples lost: " << ReadableCount(lost_samples);
913 if (lost_samples != 0) {
914 os << " (kernelspace: " << ReadableCount(record_stat.kernelspace_lost_records)
915 << ", userspace: " << ReadableCount(userspace_lost_samples) << ")";
916 }
917 os << ".";
918 LOG(INFO) << os.str();
919
920 LOG(DEBUG) << "Record stat: kernelspace_lost_records="
921 << ReadableCount(record_stat.kernelspace_lost_records)
922 << ", userspace_lost_samples=" << ReadableCount(record_stat.userspace_lost_samples)
923 << ", userspace_lost_non_samples="
924 << ReadableCount(record_stat.userspace_lost_non_samples)
925 << ", userspace_truncated_stack_samples="
926 << ReadableCount(record_stat.userspace_truncated_stack_samples);
927
928 if (sample_record_count_ + record_stat.kernelspace_lost_records != 0) {
929 double kernelspace_lost_percent =
930 static_cast<double>(record_stat.kernelspace_lost_records) /
931 (record_stat.kernelspace_lost_records + sample_record_count_);
932 constexpr double KERNELSPACE_LOST_PERCENT_WARNING_BAR = 0.1;
933 if (kernelspace_lost_percent >= KERNELSPACE_LOST_PERCENT_WARNING_BAR) {
934 LOG(WARNING) << "Lost " << (kernelspace_lost_percent * 100)
935 << "% of samples in kernel space, "
936 << "consider increasing kernel buffer size(-m), "
937 << "or decreasing sample frequency(-f), "
938 << "or increasing sample period(-c).";
939 }
940 }
941 size_t userspace_lost_truncated_samples =
942 userspace_lost_samples + record_stat.userspace_truncated_stack_samples;
943 size_t userspace_complete_samples =
944 sample_record_count_ - record_stat.userspace_truncated_stack_samples;
945 if (userspace_complete_samples + userspace_lost_truncated_samples != 0) {
946 double userspace_lost_percent =
947 static_cast<double>(userspace_lost_truncated_samples) /
948 (userspace_complete_samples + userspace_lost_truncated_samples);
949 constexpr double USERSPACE_LOST_PERCENT_WARNING_BAR = 0.1;
950 if (userspace_lost_percent >= USERSPACE_LOST_PERCENT_WARNING_BAR) {
951 LOG(WARNING) << "Lost/Truncated " << (userspace_lost_percent * 100)
952 << "% of samples in user space, "
953 << "consider increasing userspace buffer size(--user-buffer-size), "
954 << "or decreasing sample frequency(-f), "
955 << "or increasing sample period(-c).";
956 }
957 }
958 if (callchain_joiner_) {
959 callchain_joiner_->DumpStat();
960 }
961 }
962 LOG(DEBUG) << "Prepare recording time "
963 << (time_stat_.start_recording_time - time_stat_.prepare_recording_time) / 1e9
964 << " s, recording time "
965 << (time_stat_.stop_recording_time - time_stat_.start_recording_time) / 1e9
966 << " s, stop recording time "
967 << (time_stat_.finish_recording_time - time_stat_.stop_recording_time) / 1e9
968 << " s, post process time "
969 << (time_stat_.post_process_time - time_stat_.finish_recording_time) / 1e9 << " s.";
970 return true;
971 }
972
ParseOptions(const std::vector<std::string> & args,std::vector<std::string> * non_option_args,ProbeEvents & probe_events)973 bool RecordCommand::ParseOptions(const std::vector<std::string>& args,
974 std::vector<std::string>* non_option_args,
975 ProbeEvents& probe_events) {
976 OptionValueMap options;
977 std::vector<std::pair<OptionName, OptionValue>> ordered_options;
978
979 if (!PreprocessOptions(args, GetRecordCmdOptionFormats(), &options, &ordered_options,
980 non_option_args)) {
981 return false;
982 }
983
984 // Process options.
985 system_wide_collection_ = options.PullBoolValue("-a");
986
987 if (auto value = options.PullValue("--add-counter"); value) {
988 add_counters_ = android::base::Split(*value->str_value, ",");
989 }
990
991 for (const OptionValue& value : options.PullValues("--add-meta-info")) {
992 const std::string& s = *value.str_value;
993 auto split_pos = s.find('=');
994 if (split_pos == std::string::npos || split_pos == 0 || split_pos + 1 == s.size()) {
995 LOG(ERROR) << "invalid meta-info: " << s;
996 return false;
997 }
998 extra_meta_info_[s.substr(0, split_pos)] = s.substr(split_pos + 1);
999 }
1000
1001 if (auto value = options.PullValue("--addr-filter"); value) {
1002 auto filters = ParseAddrFilterOption(*value->str_value);
1003 if (filters.empty()) {
1004 return false;
1005 }
1006 event_selection_set_.SetAddrFilters(std::move(filters));
1007 }
1008
1009 if (auto value = options.PullValue("--app"); value) {
1010 app_package_name_ = *value->str_value;
1011 }
1012
1013 if (auto value = options.PullValue("--aux-buffer-size"); value) {
1014 uint64_t v = value->uint_value;
1015 if (v > std::numeric_limits<size_t>::max() || !IsPowerOfTwo(v) || v % sysconf(_SC_PAGE_SIZE)) {
1016 LOG(ERROR) << "invalid aux buffer size: " << v;
1017 return false;
1018 }
1019 aux_buffer_size_ = static_cast<size_t>(v);
1020 }
1021
1022 if (options.PullValue("-b")) {
1023 branch_sampling_ = branch_sampling_type_map["any"];
1024 }
1025
1026 if (auto value = options.PullValue("--binary"); value) {
1027 binary_name_regex_ = RegEx::Create(*value->str_value);
1028 if (binary_name_regex_ == nullptr) {
1029 return false;
1030 }
1031 }
1032
1033 if (!options.PullUintValue("--callchain-joiner-min-matching-nodes",
1034 &callchain_joiner_min_matching_nodes_, 1)) {
1035 return false;
1036 }
1037
1038 if (auto value = options.PullValue("--clockid"); value) {
1039 clockid_ = *value->str_value;
1040 if (clockid_ != "perf") {
1041 if (!IsSettingClockIdSupported()) {
1042 LOG(ERROR) << "Setting clockid is not supported by the kernel.";
1043 return false;
1044 }
1045 if (clockid_map.find(clockid_) == clockid_map.end()) {
1046 LOG(ERROR) << "Invalid clockid: " << clockid_;
1047 return false;
1048 }
1049 }
1050 }
1051
1052 if (!options.PullUintValue("--cpu-percent", &cpu_time_max_percent_, 1, 100)) {
1053 return false;
1054 }
1055
1056 if (options.PullBoolValue("--decode-etm")) {
1057 etm_branch_list_generator_ = ETMBranchListGenerator::Create(system_wide_collection_);
1058 }
1059 uint32_t interval = 0;
1060 if (options.PullUintValue("--etm-flush-interval", &interval) && interval != 0) {
1061 etm_flush_interval_ = std::chrono::milliseconds(interval);
1062 }
1063
1064 if (options.PullBoolValue("--record-timestamp")) {
1065 ETMRecorder& recorder = ETMRecorder::GetInstance();
1066 recorder.SetRecordTimestamp(true);
1067 }
1068
1069 if (options.PullBoolValue("--record-cycles")) {
1070 ETMRecorder& recorder = ETMRecorder::GetInstance();
1071 recorder.SetRecordCycles(true);
1072 }
1073
1074 if (!options.PullUintValue("--delay", &delay_in_ms_)) {
1075 return false;
1076 }
1077
1078 size_t cyc_threshold;
1079 if (options.PullUintValue("--cycle-threshold", &cyc_threshold)) {
1080 ETMRecorder& recorder = ETMRecorder::GetInstance();
1081 recorder.SetCycleThreshold(cyc_threshold);
1082 }
1083
1084 if (!options.PullDoubleValue("--duration", &duration_in_sec_, 1e-9)) {
1085 return false;
1086 }
1087
1088 exclude_perf_ = options.PullBoolValue("--exclude-perf");
1089 if (!record_filter_.ParseOptions(options)) {
1090 return false;
1091 }
1092
1093 if (options.PullValue("--exit-with-parent")) {
1094 prctl(PR_SET_PDEATHSIG, SIGHUP, 0, 0, 0);
1095 }
1096
1097 in_app_context_ = options.PullBoolValue("--in-app");
1098
1099 for (const OptionValue& value : options.PullValues("-j")) {
1100 std::vector<std::string> branch_sampling_types = android::base::Split(*value.str_value, ",");
1101 for (auto& type : branch_sampling_types) {
1102 auto it = branch_sampling_type_map.find(type);
1103 if (it == branch_sampling_type_map.end()) {
1104 LOG(ERROR) << "unrecognized branch sampling filter: " << type;
1105 return false;
1106 }
1107 branch_sampling_ |= it->second;
1108 }
1109 }
1110 keep_failed_unwinding_result_ = options.PullBoolValue("--keep-failed-unwinding-result");
1111 keep_failed_unwinding_debug_info_ = options.PullBoolValue("--keep-failed-unwinding-debug-info");
1112 if (keep_failed_unwinding_debug_info_) {
1113 keep_failed_unwinding_result_ = true;
1114 }
1115
1116 for (const OptionValue& value : options.PullValues("--kprobe")) {
1117 std::vector<std::string> cmds = android::base::Split(*value.str_value, ",");
1118 for (const auto& cmd : cmds) {
1119 if (!probe_events.AddKprobe(cmd)) {
1120 return false;
1121 }
1122 }
1123 }
1124
1125 if (auto value = options.PullValue("-m"); value) {
1126 if (!IsPowerOfTwo(value->uint_value) ||
1127 value->uint_value > std::numeric_limits<size_t>::max()) {
1128 LOG(ERROR) << "Invalid mmap_pages: '" << value->uint_value << "'";
1129 return false;
1130 }
1131 mmap_page_range_.first = mmap_page_range_.second = value->uint_value;
1132 }
1133
1134 allow_callchain_joiner_ = !options.PullBoolValue("--no-callchain-joiner");
1135 allow_truncating_samples_ = !options.PullBoolValue("--no-cut-samples");
1136 can_dump_kernel_symbols_ = !options.PullBoolValue("--no-dump-kernel-symbols");
1137 dump_symbols_ = !options.PullBoolValue("--no-dump-symbols");
1138 if (auto value = options.PullValue("--no-inherit"); value) {
1139 child_inherit_ = false;
1140 } else if (system_wide_collection_) {
1141 // child_inherit is used to monitor newly created threads. It isn't useful in system wide
1142 // collection, which monitors all threads running on selected cpus.
1143 child_inherit_ = false;
1144 }
1145 unwind_dwarf_callchain_ = !options.PullBoolValue("--no-unwind");
1146
1147 if (auto value = options.PullValue("-o"); value) {
1148 record_filename_ = *value->str_value;
1149 }
1150
1151 if (auto value = options.PullValue("--out-fd"); value) {
1152 out_fd_.reset(static_cast<int>(value->uint_value));
1153 }
1154
1155 if (auto strs = options.PullStringValues("-p"); !strs.empty()) {
1156 if (auto pids = GetPidsFromStrings(strs, true, true); pids) {
1157 event_selection_set_.AddMonitoredProcesses(pids.value());
1158 } else {
1159 return false;
1160 }
1161 }
1162
1163 // Use explicit if statements instead of logical operators to avoid short-circuit.
1164 if (options.PullValue("--post-unwind")) {
1165 post_unwind_ = true;
1166 }
1167 if (options.PullValue("--post-unwind=yes")) {
1168 post_unwind_ = true;
1169 }
1170 if (options.PullValue("--post-unwind=no")) {
1171 post_unwind_ = false;
1172 }
1173
1174 if (auto value = options.PullValue("--user-buffer-size"); value) {
1175 uint64_t v = value->uint_value;
1176 if (v > std::numeric_limits<size_t>::max() || v == 0) {
1177 LOG(ERROR) << "invalid user buffer size: " << v;
1178 return false;
1179 }
1180 user_buffer_size_ = static_cast<size_t>(v);
1181 }
1182
1183 if (!options.PullUintValue("--size-limit", &size_limit_in_bytes_, 1)) {
1184 return false;
1185 }
1186
1187 if (auto value = options.PullValue("--start_profiling_fd"); value) {
1188 start_profiling_fd_.reset(static_cast<int>(value->uint_value));
1189 }
1190
1191 stdio_controls_profiling_ = options.PullBoolValue("--stdio-controls-profiling");
1192
1193 if (auto value = options.PullValue("--stop-signal-fd"); value) {
1194 stop_signal_fd_.reset(static_cast<int>(value->uint_value));
1195 }
1196
1197 if (auto value = options.PullValue("--symfs"); value) {
1198 if (!Dso::SetSymFsDir(*value->str_value)) {
1199 return false;
1200 }
1201 }
1202
1203 for (const OptionValue& value : options.PullValues("-t")) {
1204 if (auto tids = GetTidsFromString(*value.str_value, true); tids) {
1205 event_selection_set_.AddMonitoredThreads(tids.value());
1206 } else {
1207 return false;
1208 }
1209 }
1210
1211 trace_offcpu_ = options.PullBoolValue("--trace-offcpu");
1212
1213 if (auto value = options.PullValue("--tracepoint-events"); value) {
1214 if (!EventTypeManager::Instance().ReadTracepointsFromFile(*value->str_value)) {
1215 return false;
1216 }
1217 }
1218 use_cmd_exit_code_ = options.PullBoolValue("--use-cmd-exit-code");
1219
1220 CHECK(options.values.empty());
1221
1222 // Process ordered options.
1223 for (const auto& pair : ordered_options) {
1224 const OptionName& name = pair.first;
1225 const OptionValue& value = pair.second;
1226
1227 if (name == "-c" || name == "-f") {
1228 if (value.uint_value < 1) {
1229 LOG(ERROR) << "invalid " << name << ": " << value.uint_value;
1230 return false;
1231 }
1232 SampleRate rate;
1233 if (name == "-c") {
1234 rate.sample_period = value.uint_value;
1235 } else {
1236 if (value.uint_value >= INT_MAX) {
1237 LOG(ERROR) << "sample freq can't be bigger than INT_MAX: " << value.uint_value;
1238 return false;
1239 }
1240 rate.sample_freq = value.uint_value;
1241 }
1242 event_selection_set_.SetSampleRateForNewEvents(rate);
1243
1244 } else if (name == "--call-graph") {
1245 std::vector<std::string> strs = android::base::Split(*value.str_value, ",");
1246 if (strs[0] == "fp") {
1247 fp_callchain_sampling_ = true;
1248 dwarf_callchain_sampling_ = false;
1249 } else if (strs[0] == "dwarf") {
1250 fp_callchain_sampling_ = false;
1251 dwarf_callchain_sampling_ = true;
1252 if (strs.size() > 1) {
1253 uint64_t size;
1254 if (!ParseUint(strs[1], &size)) {
1255 LOG(ERROR) << "invalid dump stack size in --call-graph option: " << strs[1];
1256 return false;
1257 }
1258 if ((size & 7) != 0) {
1259 LOG(ERROR) << "dump stack size " << size << " is not 8-byte aligned.";
1260 return false;
1261 }
1262 if (size >= MAX_DUMP_STACK_SIZE) {
1263 LOG(ERROR) << "dump stack size " << size << " is bigger than max allowed size "
1264 << MAX_DUMP_STACK_SIZE << ".";
1265 return false;
1266 }
1267 dump_stack_size_in_dwarf_sampling_ = static_cast<uint32_t>(size);
1268 }
1269 }
1270
1271 } else if (name == "--cpu") {
1272 if (auto cpus = GetCpusFromString(*value.str_value); cpus) {
1273 event_selection_set_.SetCpusForNewEvents(
1274 std::vector<int>(cpus.value().begin(), cpus.value().end()));
1275 } else {
1276 return false;
1277 }
1278 } else if (name == "-e") {
1279 std::vector<std::string> event_types = android::base::Split(*value.str_value, ",");
1280 for (auto& event_type : event_types) {
1281 if (!probe_events.CreateProbeEventIfNotExist(event_type)) {
1282 return false;
1283 }
1284 if (!event_selection_set_.AddEventType(event_type)) {
1285 return false;
1286 }
1287 }
1288 } else if (name == "-g") {
1289 fp_callchain_sampling_ = false;
1290 dwarf_callchain_sampling_ = true;
1291 } else if (name == "--group") {
1292 std::vector<std::string> event_types = android::base::Split(*value.str_value, ",");
1293 for (const auto& event_type : event_types) {
1294 if (!probe_events.CreateProbeEventIfNotExist(event_type)) {
1295 return false;
1296 }
1297 }
1298 if (!event_selection_set_.AddEventGroup(event_types)) {
1299 return false;
1300 }
1301 } else if (name == "--tp-filter") {
1302 if (!event_selection_set_.SetTracepointFilter(*value.str_value)) {
1303 return false;
1304 }
1305 } else {
1306 LOG(ERROR) << "unprocessed option: " << name;
1307 return false;
1308 }
1309 }
1310
1311 if (!dwarf_callchain_sampling_) {
1312 if (!unwind_dwarf_callchain_) {
1313 LOG(ERROR) << "--no-unwind is only used with `--call-graph dwarf` option.";
1314 return false;
1315 }
1316 unwind_dwarf_callchain_ = false;
1317 }
1318 if (post_unwind_) {
1319 if (!dwarf_callchain_sampling_ || !unwind_dwarf_callchain_) {
1320 post_unwind_ = false;
1321 }
1322 }
1323
1324 if (fp_callchain_sampling_) {
1325 if (GetTargetArch() == ARCH_ARM) {
1326 LOG(WARNING) << "`--callgraph fp` option doesn't work well on arm architecture, "
1327 << "consider using `-g` option or profiling on aarch64 architecture.";
1328 }
1329 }
1330
1331 if (system_wide_collection_ && event_selection_set_.HasMonitoredTarget()) {
1332 LOG(ERROR) << "Record system wide and existing processes/threads can't be "
1333 "used at the same time.";
1334 return false;
1335 }
1336
1337 if (system_wide_collection_ && !IsRoot()) {
1338 LOG(ERROR) << "System wide profiling needs root privilege.";
1339 return false;
1340 }
1341
1342 if (dump_symbols_ && can_dump_kernel_symbols_) {
1343 // No need to dump kernel symbols as we will dump all required symbols.
1344 can_dump_kernel_symbols_ = false;
1345 }
1346 if (clockid_.empty()) {
1347 clockid_ = IsSettingClockIdSupported() ? "monotonic" : "perf";
1348 }
1349
1350 return true;
1351 }
1352
AdjustPerfEventLimit()1353 bool RecordCommand::AdjustPerfEventLimit() {
1354 bool set_prop = false;
1355 // 1. Adjust max_sample_rate.
1356 uint64_t cur_max_freq;
1357 if (GetMaxSampleFrequency(&cur_max_freq) && cur_max_freq < max_sample_freq_ &&
1358 !SetMaxSampleFrequency(max_sample_freq_)) {
1359 set_prop = true;
1360 }
1361 // 2. Adjust perf_cpu_time_max_percent.
1362 size_t cur_percent;
1363 if (GetCpuTimeMaxPercent(&cur_percent) && cur_percent != cpu_time_max_percent_ &&
1364 !SetCpuTimeMaxPercent(cpu_time_max_percent_)) {
1365 set_prop = true;
1366 }
1367 // 3. Adjust perf_event_mlock_kb.
1368 long cpus = sysconf(_SC_NPROCESSORS_CONF);
1369 uint64_t mlock_kb = cpus * (mmap_page_range_.second + 1) * 4;
1370 if (event_selection_set_.HasAuxTrace()) {
1371 mlock_kb += cpus * aux_buffer_size_ / 1024;
1372 }
1373 uint64_t cur_mlock_kb;
1374 if (GetPerfEventMlockKb(&cur_mlock_kb) && cur_mlock_kb < mlock_kb &&
1375 !SetPerfEventMlockKb(mlock_kb)) {
1376 set_prop = true;
1377 }
1378
1379 if (GetAndroidVersion() >= kAndroidVersionQ && set_prop && !in_app_context_) {
1380 return SetPerfEventLimits(std::max(max_sample_freq_, cur_max_freq), cpu_time_max_percent_,
1381 std::max(mlock_kb, cur_mlock_kb));
1382 }
1383 return true;
1384 }
1385
TraceOffCpu()1386 bool RecordCommand::TraceOffCpu() {
1387 if (FindEventTypeByName("sched:sched_switch") == nullptr) {
1388 LOG(ERROR) << "Can't trace off cpu because sched:sched_switch event is not available";
1389 return false;
1390 }
1391 for (auto& event_type : event_selection_set_.GetTracepointEvents()) {
1392 if (event_type->name == "sched:sched_switch") {
1393 LOG(ERROR) << "Trace offcpu can't be used together with sched:sched_switch event";
1394 return false;
1395 }
1396 }
1397 if (!IsDumpingRegsForTracepointEventsSupported()) {
1398 LOG(ERROR) << "Dumping regs for tracepoint events is not supported by the kernel";
1399 return false;
1400 }
1401 // --trace-offcpu option only works with one of the selected event types.
1402 std::set<std::string> accepted_events = {"cpu-clock", "task-clock"};
1403 std::vector<const EventType*> events = event_selection_set_.GetEvents();
1404 if (events.size() != 1 || accepted_events.find(events[0]->name) == accepted_events.end()) {
1405 LOG(ERROR) << "--trace-offcpu option only works with one of events "
1406 << android::base::Join(accepted_events, ' ');
1407 return false;
1408 }
1409 if (!event_selection_set_.AddEventType("sched:sched_switch", SampleRate(0, 1))) {
1410 return false;
1411 }
1412 if (IsSwitchRecordSupported()) {
1413 event_selection_set_.EnableSwitchRecord();
1414 }
1415 return true;
1416 }
1417
SetEventSelectionFlags()1418 bool RecordCommand::SetEventSelectionFlags() {
1419 event_selection_set_.SampleIdAll();
1420 if (!event_selection_set_.SetBranchSampling(branch_sampling_)) {
1421 return false;
1422 }
1423 if (fp_callchain_sampling_) {
1424 event_selection_set_.EnableFpCallChainSampling();
1425 } else if (dwarf_callchain_sampling_) {
1426 if (!event_selection_set_.EnableDwarfCallChainSampling(dump_stack_size_in_dwarf_sampling_)) {
1427 return false;
1428 }
1429 }
1430 event_selection_set_.SetInherit(child_inherit_);
1431 if (clockid_ != "perf") {
1432 event_selection_set_.SetClockId(clockid_map[clockid_]);
1433 }
1434 return true;
1435 }
1436
CreateAndInitRecordFile()1437 bool RecordCommand::CreateAndInitRecordFile() {
1438 EventAttrIds attrs = event_selection_set_.GetEventAttrWithId();
1439 bool remove_regs_and_stacks = unwind_dwarf_callchain_ && !post_unwind_;
1440 if (remove_regs_and_stacks) {
1441 for (auto& attr : attrs) {
1442 ReplaceRegAndStackWithCallChain(attr.attr);
1443 }
1444 }
1445 record_file_writer_ = CreateRecordFile(record_filename_, attrs);
1446 if (record_file_writer_ == nullptr) {
1447 return false;
1448 }
1449 // Use first perf_event_attr and first event id to dump mmap and comm records.
1450 CHECK(!attrs.empty());
1451 dumping_attr_id_ = attrs[0];
1452 CHECK(!dumping_attr_id_.ids.empty());
1453 map_record_reader_.emplace(dumping_attr_id_.attr, dumping_attr_id_.ids[0],
1454 event_selection_set_.RecordNotExecutableMaps());
1455 map_record_reader_->SetCallback([this](Record* r) { return ProcessRecord(r); });
1456
1457 return DumpKernelSymbol() && DumpTracingData() && DumpMaps() && DumpAuxTraceInfo();
1458 }
1459
CreateRecordFile(const std::string & filename,const EventAttrIds & attrs)1460 std::unique_ptr<RecordFileWriter> RecordCommand::CreateRecordFile(const std::string& filename,
1461 const EventAttrIds& attrs) {
1462 std::unique_ptr<RecordFileWriter> writer = RecordFileWriter::CreateInstance(filename);
1463 if (writer != nullptr && writer->WriteAttrSection(attrs)) {
1464 return writer;
1465 }
1466 return nullptr;
1467 }
1468
DumpKernelSymbol()1469 bool RecordCommand::DumpKernelSymbol() {
1470 if (can_dump_kernel_symbols_) {
1471 if (event_selection_set_.NeedKernelSymbol()) {
1472 std::string kallsyms;
1473 if (!LoadKernelSymbols(&kallsyms)) {
1474 // Symbol loading may have failed due to the lack of permissions. This
1475 // is not fatal, the symbols will appear as "unknown".
1476 return true;
1477 }
1478 KernelSymbolRecord r(kallsyms);
1479 if (!ProcessRecord(&r)) {
1480 return false;
1481 }
1482 }
1483 }
1484 return true;
1485 }
1486
DumpTracingData()1487 bool RecordCommand::DumpTracingData() {
1488 std::vector<const EventType*> tracepoint_event_types = event_selection_set_.GetTracepointEvents();
1489 if (tracepoint_event_types.empty() || !CanRecordRawData() || in_app_context_) {
1490 return true; // No need to dump tracing data, or can't do it.
1491 }
1492 std::vector<char> tracing_data;
1493 if (!GetTracingData(tracepoint_event_types, &tracing_data)) {
1494 return false;
1495 }
1496 TracingDataRecord record(tracing_data);
1497 if (!ProcessRecord(&record)) {
1498 return false;
1499 }
1500 return true;
1501 }
1502
DumpMaps()1503 bool RecordCommand::DumpMaps() {
1504 if (system_wide_collection_) {
1505 // For system wide recording:
1506 // If not aux tracing, only dump kernel maps. Maps of a process is dumped when needed (the
1507 // first time a sample hits that process).
1508 // If aux tracing with decoding etm data, the maps are dumped by etm_branch_list_generator.
1509 // If aux tracing without decoding etm data, we don't know which maps will be needed, so dump
1510 // all process maps. To reduce pre recording time, we dump process maps in map record thread
1511 // while recording.
1512 if (event_selection_set_.HasAuxTrace() && !etm_branch_list_generator_) {
1513 map_record_thread_.emplace(*map_record_reader_);
1514 return true;
1515 }
1516 if (!event_selection_set_.ExcludeKernel()) {
1517 return map_record_reader_->ReadKernelMaps();
1518 }
1519 return true;
1520 }
1521 if (!event_selection_set_.ExcludeKernel() && !map_record_reader_->ReadKernelMaps()) {
1522 return false;
1523 }
1524 // Map from process id to a set of thread ids in that process.
1525 std::unordered_map<pid_t, std::unordered_set<pid_t>> process_map;
1526 for (pid_t pid : event_selection_set_.GetMonitoredProcesses()) {
1527 std::vector<pid_t> tids = GetThreadsInProcess(pid);
1528 process_map[pid].insert(tids.begin(), tids.end());
1529 }
1530 for (pid_t tid : event_selection_set_.GetMonitoredThreads()) {
1531 pid_t pid;
1532 if (GetProcessForThread(tid, &pid)) {
1533 process_map[pid].insert(tid);
1534 }
1535 }
1536
1537 // Dump each process.
1538 for (const auto& [pid, tids] : process_map) {
1539 if (!map_record_reader_->ReadProcessMaps(pid, tids, 0)) {
1540 return false;
1541 }
1542 }
1543 return true;
1544 }
1545
ProcessRecord(Record * record)1546 bool RecordCommand::ProcessRecord(Record* record) {
1547 UpdateRecord(record);
1548 if (ShouldOmitRecord(record)) {
1549 return true;
1550 }
1551 if (size_limit_in_bytes_ > 0u) {
1552 if (size_limit_in_bytes_ < record_file_writer_->GetDataSectionSize()) {
1553 return event_selection_set_.GetIOEventLoop()->ExitLoop();
1554 }
1555 }
1556 if (jit_debug_reader_ && !jit_debug_reader_->UpdateRecord(record)) {
1557 return false;
1558 }
1559 last_record_timestamp_ = std::max(last_record_timestamp_, record->Timestamp());
1560 // In system wide recording, maps are dumped when they are needed by records.
1561 if (system_wide_collection_ && !DumpMapsForRecord(record)) {
1562 return false;
1563 }
1564 // Record filter check should go after DumpMapsForRecord(). Otherwise, process/thread name
1565 // filters don't work in system wide collection.
1566 if (record->type() == PERF_RECORD_SAMPLE) {
1567 if (!record_filter_.Check(static_cast<SampleRecord&>(*record))) {
1568 return true;
1569 }
1570 }
1571 if (etm_branch_list_generator_) {
1572 bool consumed = false;
1573 if (!etm_branch_list_generator_->ProcessRecord(*record, consumed)) {
1574 return false;
1575 }
1576 if (consumed) {
1577 return true;
1578 }
1579 }
1580 if (unwind_dwarf_callchain_) {
1581 if (post_unwind_) {
1582 return SaveRecordForPostUnwinding(record);
1583 }
1584 return SaveRecordAfterUnwinding(record);
1585 }
1586 return SaveRecordWithoutUnwinding(record);
1587 }
1588
DumpAuxTraceInfo()1589 bool RecordCommand::DumpAuxTraceInfo() {
1590 if (event_selection_set_.HasAuxTrace()) {
1591 AuxTraceInfoRecord auxtrace_info = ETMRecorder::GetInstance().CreateAuxTraceInfoRecord();
1592 return ProcessRecord(&auxtrace_info);
1593 }
1594 return true;
1595 }
1596
1597 template <typename MmapRecordType>
MapOnlyExistInMemory(MmapRecordType * record)1598 bool MapOnlyExistInMemory(MmapRecordType* record) {
1599 return !record->InKernel() && MappedFileOnlyExistInMemory(record->filename);
1600 }
1601
ShouldOmitRecord(Record * record)1602 bool RecordCommand::ShouldOmitRecord(Record* record) {
1603 if (jit_debug_reader_) {
1604 // To profile jitted Java code, we need PROT_JIT_SYMFILE_MAP maps not overlapped by maps for
1605 // [anon:dalvik-jit-code-cache]. To profile interpreted Java code, we record maps that
1606 // are not executable. Some non-exec maps (like those for stack, heap) provide misleading map
1607 // entries for unwinding, as in http://b/77236599. So it is better to remove
1608 // dalvik-jit-code-cache and other maps that only exist in memory.
1609 switch (record->type()) {
1610 case PERF_RECORD_MMAP:
1611 return MapOnlyExistInMemory(static_cast<MmapRecord*>(record));
1612 case PERF_RECORD_MMAP2:
1613 return MapOnlyExistInMemory(static_cast<Mmap2Record*>(record));
1614 }
1615 }
1616 return false;
1617 }
1618
DumpMapsForRecord(Record * record)1619 bool RecordCommand::DumpMapsForRecord(Record* record) {
1620 if (record->type() == PERF_RECORD_SAMPLE) {
1621 pid_t pid = static_cast<SampleRecord*>(record)->tid_data.pid;
1622 if (dumped_processes_.find(pid) == dumped_processes_.end()) {
1623 // Dump map info and all thread names for that process.
1624 if (!map_record_reader_->ReadProcessMaps(pid, last_record_timestamp_)) {
1625 return false;
1626 }
1627 dumped_processes_.insert(pid);
1628 }
1629 }
1630 return true;
1631 }
1632
SaveRecordForPostUnwinding(Record * record)1633 bool RecordCommand::SaveRecordForPostUnwinding(Record* record) {
1634 if (!record_file_writer_->WriteRecord(*record)) {
1635 LOG(ERROR) << "If there isn't enough space for storing profiling data, consider using "
1636 << "--no-post-unwind option.";
1637 return false;
1638 }
1639 return true;
1640 }
1641
SaveRecordAfterUnwinding(Record * record)1642 bool RecordCommand::SaveRecordAfterUnwinding(Record* record) {
1643 if (record->type() == PERF_RECORD_SAMPLE) {
1644 auto& r = *static_cast<SampleRecord*>(record);
1645 // AdjustCallChainGeneratedByKernel() should go before UnwindRecord(). Because we don't want
1646 // to adjust callchains generated by dwarf unwinder.
1647 r.AdjustCallChainGeneratedByKernel();
1648 if (!UnwindRecord(r)) {
1649 return false;
1650 }
1651 // ExcludeKernelCallChain() should go after UnwindRecord() to notice the generated user call
1652 // chain.
1653 if (r.InKernel() && exclude_kernel_callchain_ && !r.ExcludeKernelCallChain()) {
1654 // If current record contains no user callchain, skip it.
1655 return true;
1656 }
1657 sample_record_count_++;
1658 } else {
1659 thread_tree_.Update(*record);
1660 }
1661 return record_file_writer_->WriteRecord(*record);
1662 }
1663
SaveRecordWithoutUnwinding(Record * record)1664 bool RecordCommand::SaveRecordWithoutUnwinding(Record* record) {
1665 if (record->type() == PERF_RECORD_SAMPLE) {
1666 auto& r = *static_cast<SampleRecord*>(record);
1667 if (fp_callchain_sampling_ || dwarf_callchain_sampling_) {
1668 r.AdjustCallChainGeneratedByKernel();
1669 }
1670 if (r.InKernel() && exclude_kernel_callchain_ && !r.ExcludeKernelCallChain()) {
1671 // If current record contains no user callchain, skip it.
1672 return true;
1673 }
1674 sample_record_count_++;
1675 }
1676 return record_file_writer_->WriteRecord(*record);
1677 }
1678
ProcessJITDebugInfo(std::vector<JITDebugInfo> debug_info,bool sync_kernel_records)1679 bool RecordCommand::ProcessJITDebugInfo(std::vector<JITDebugInfo> debug_info,
1680 bool sync_kernel_records) {
1681 for (auto& info : debug_info) {
1682 if (info.type == JITDebugInfo::JIT_DEBUG_JIT_CODE) {
1683 uint64_t timestamp =
1684 jit_debug_reader_->SyncWithRecords() ? info.timestamp : last_record_timestamp_;
1685 Mmap2Record record(dumping_attr_id_.attr, false, info.pid, info.pid, info.jit_code_addr,
1686 info.jit_code_len, info.file_offset, map_flags::PROT_JIT_SYMFILE_MAP,
1687 info.file_path, dumping_attr_id_.ids[0], timestamp);
1688 if (!ProcessRecord(&record)) {
1689 return false;
1690 }
1691 } else {
1692 if (!info.symbols.empty()) {
1693 Dso* dso = thread_tree_.FindUserDsoOrNew(info.file_path, 0, DSO_DEX_FILE);
1694 dso->SetSymbols(&info.symbols);
1695 }
1696 if (info.dex_file_map) {
1697 ThreadMmap& map = *info.dex_file_map;
1698 uint64_t timestamp =
1699 jit_debug_reader_->SyncWithRecords() ? info.timestamp : last_record_timestamp_;
1700 Mmap2Record record(dumping_attr_id_.attr, false, info.pid, info.pid, map.start_addr,
1701 map.len, map.pgoff, map.prot, map.name, dumping_attr_id_.ids[0],
1702 timestamp);
1703 if (!ProcessRecord(&record)) {
1704 return false;
1705 }
1706 }
1707 thread_tree_.AddDexFileOffset(info.file_path, info.dex_file_offset);
1708 }
1709 }
1710 // We want to let samples see the most recent JIT maps generated before them, but no JIT maps
1711 // generated after them. So process existing samples each time generating new JIT maps. We prefer
1712 // to process samples after processing JIT maps. Because some of the samples may hit the new JIT
1713 // maps, and we want to report them properly.
1714 if (sync_kernel_records && !event_selection_set_.SyncKernelBuffer()) {
1715 return false;
1716 }
1717 return true;
1718 }
1719
ProcessControlCmd(IOEventLoop * loop)1720 bool RecordCommand::ProcessControlCmd(IOEventLoop* loop) {
1721 char* line = nullptr;
1722 size_t line_length = 0;
1723 if (getline(&line, &line_length, stdin) == -1) {
1724 free(line);
1725 // When the simpleperf Java API destroys the simpleperf process, it also closes the stdin pipe.
1726 // So we may see EOF of stdin.
1727 return loop->ExitLoop();
1728 }
1729 std::string cmd = android::base::Trim(line);
1730 free(line);
1731 LOG(DEBUG) << "process control cmd: " << cmd;
1732 bool result = false;
1733 if (cmd == "pause") {
1734 result = event_selection_set_.SetEnableEvents(false);
1735 } else if (cmd == "resume") {
1736 result = event_selection_set_.SetEnableEvents(true);
1737 } else {
1738 LOG(ERROR) << "unknown control cmd: " << cmd;
1739 }
1740 printf("%s\n", result ? "ok" : "error");
1741 fflush(stdout);
1742 return result;
1743 }
1744
1745 template <class RecordType>
UpdateMmapRecordForEmbeddedPath(RecordType & r,bool has_prot,uint32_t prot)1746 void UpdateMmapRecordForEmbeddedPath(RecordType& r, bool has_prot, uint32_t prot) {
1747 if (r.InKernel()) {
1748 return;
1749 }
1750 std::string filename = r.filename;
1751 bool name_changed = false;
1752 // Some vdex files in map files are marked with deleted flag, but they exist in the file system.
1753 // It may be because a new file is used to replace the old one, but still worth to try.
1754 if (android::base::EndsWith(filename, " (deleted)")) {
1755 filename.resize(filename.size() - 10);
1756 name_changed = true;
1757 }
1758 if (r.data->pgoff != 0 && (!has_prot || (prot & PROT_EXEC))) {
1759 // For the case of a shared library "foobar.so" embedded
1760 // inside an APK, we rewrite the original MMAP from
1761 // ["path.apk" offset=X] to ["path.apk!/foobar.so" offset=W]
1762 // so as to make the library name explicit. This update is
1763 // done here (as part of the record operation) as opposed to
1764 // on the host during the report, since we want to report
1765 // the correct library name even if the the APK in question
1766 // is not present on the host. The new offset W is
1767 // calculated to be with respect to the start of foobar.so,
1768 // not to the start of path.apk.
1769 EmbeddedElf* ee = ApkInspector::FindElfInApkByOffset(filename, r.data->pgoff);
1770 if (ee != nullptr) {
1771 // Compute new offset relative to start of elf in APK.
1772 auto data = *r.data;
1773 data.pgoff -= ee->entry_offset();
1774 r.SetDataAndFilename(data, GetUrlInApk(filename, ee->entry_name()));
1775 return;
1776 }
1777 }
1778 std::string zip_path;
1779 std::string entry_name;
1780 if (ParseExtractedInMemoryPath(filename, &zip_path, &entry_name)) {
1781 filename = GetUrlInApk(zip_path, entry_name);
1782 name_changed = true;
1783 }
1784 if (name_changed) {
1785 auto data = *r.data;
1786 r.SetDataAndFilename(data, filename);
1787 }
1788 }
1789
UpdateRecord(Record * record)1790 void RecordCommand::UpdateRecord(Record* record) {
1791 if (record->type() == PERF_RECORD_MMAP) {
1792 UpdateMmapRecordForEmbeddedPath(*static_cast<MmapRecord*>(record), false, 0);
1793 } else if (record->type() == PERF_RECORD_MMAP2) {
1794 auto r = static_cast<Mmap2Record*>(record);
1795 UpdateMmapRecordForEmbeddedPath(*r, true, r->data->prot);
1796 } else if (record->type() == PERF_RECORD_COMM) {
1797 auto r = static_cast<CommRecord*>(record);
1798 if (r->data->pid == r->data->tid) {
1799 std::string s = GetCompleteProcessName(r->data->pid);
1800 if (!s.empty()) {
1801 r->SetCommandName(s);
1802 }
1803 }
1804 }
1805 }
1806
UnwindRecord(SampleRecord & r)1807 bool RecordCommand::UnwindRecord(SampleRecord& r) {
1808 if (!(r.sample_type & PERF_SAMPLE_CALLCHAIN) && (r.sample_type & PERF_SAMPLE_REGS_USER) &&
1809 (r.regs_user_data.reg_mask != 0) && (r.sample_type & PERF_SAMPLE_STACK_USER)) {
1810 return true;
1811 }
1812 if (r.GetValidStackSize() > 0) {
1813 ThreadEntry* thread = thread_tree_.FindThreadOrNew(r.tid_data.pid, r.tid_data.tid);
1814 RegSet regs(r.regs_user_data.abi, r.regs_user_data.reg_mask, r.regs_user_data.regs);
1815 std::vector<uint64_t> ips;
1816 std::vector<uint64_t> sps;
1817 if (!offline_unwinder_->UnwindCallChain(*thread, regs, r.stack_user_data.data,
1818 r.GetValidStackSize(), &ips, &sps)) {
1819 return false;
1820 }
1821 // The unwinding may fail if JIT debug info isn't the latest. In this case, read JIT debug info
1822 // from the process and retry unwinding.
1823 if (jit_debug_reader_ && !post_unwind_ &&
1824 offline_unwinder_->IsCallChainBrokenForIncompleteJITDebugInfo()) {
1825 jit_debug_reader_->ReadProcess(r.tid_data.pid);
1826 jit_debug_reader_->FlushDebugInfo(r.Timestamp());
1827 if (!offline_unwinder_->UnwindCallChain(*thread, regs, r.stack_user_data.data,
1828 r.GetValidStackSize(), &ips, &sps)) {
1829 return false;
1830 }
1831 }
1832 if (keep_failed_unwinding_result_ && !KeepFailedUnwindingResult(r, ips, sps)) {
1833 return false;
1834 }
1835 r.ReplaceRegAndStackWithCallChain(ips);
1836 if (callchain_joiner_ &&
1837 !callchain_joiner_->AddCallChain(r.tid_data.pid, r.tid_data.tid,
1838 CallChainJoiner::ORIGINAL_OFFLINE, ips, sps)) {
1839 return false;
1840 }
1841 } else {
1842 // For kernel samples, we still need to remove user stack and register fields.
1843 r.ReplaceRegAndStackWithCallChain({});
1844 }
1845 return true;
1846 }
1847
KeepFailedUnwindingResult(const SampleRecord & r,const std::vector<uint64_t> & ips,const std::vector<uint64_t> & sps)1848 bool RecordCommand::KeepFailedUnwindingResult(const SampleRecord& r,
1849 const std::vector<uint64_t>& ips,
1850 const std::vector<uint64_t>& sps) {
1851 auto& result = offline_unwinder_->GetUnwindingResult();
1852 if (result.error_code != unwindstack::ERROR_NONE) {
1853 if (keep_failed_unwinding_debug_info_) {
1854 return record_file_writer_->WriteRecord(UnwindingResultRecord(
1855 r.time_data.time, result, r.regs_user_data, r.stack_user_data, ips, sps));
1856 }
1857 return record_file_writer_->WriteRecord(
1858 UnwindingResultRecord(r.time_data.time, result, {}, {}, {}, {}));
1859 }
1860 return true;
1861 }
1862
MoveRecordFile(const std::string & old_filename)1863 std::unique_ptr<RecordFileReader> RecordCommand::MoveRecordFile(const std::string& old_filename) {
1864 if (!record_file_writer_->Close()) {
1865 return nullptr;
1866 }
1867 record_file_writer_.reset();
1868 std::error_code ec;
1869 std::filesystem::rename(record_filename_, old_filename, ec);
1870 if (ec) {
1871 LOG(DEBUG) << "Failed to rename: " << ec.message();
1872 // rename() fails on Android N x86 emulator, which uses kernel 3.10. Because rename() in bionic
1873 // uses renameat2 syscall, which isn't support on kernel < 3.15. So add a fallback to mv
1874 // command. The mv command can also work with other situations when rename() doesn't work.
1875 // So we'd like to keep it as a fallback to rename().
1876 if (!Workload::RunCmd({"mv", record_filename_, old_filename})) {
1877 return nullptr;
1878 }
1879 }
1880
1881 auto reader = RecordFileReader::CreateInstance(old_filename);
1882 if (!reader) {
1883 return nullptr;
1884 }
1885
1886 record_file_writer_ = CreateRecordFile(record_filename_, reader->AttrSection());
1887 if (!record_file_writer_) {
1888 return nullptr;
1889 }
1890 return reader;
1891 }
1892
MergeMapRecords()1893 bool RecordCommand::MergeMapRecords() {
1894 // 1. Move records from record_filename_ to a temporary file.
1895 auto tmp_file = ScopedTempFiles::CreateTempFile();
1896 auto reader = MoveRecordFile(tmp_file->path);
1897 if (!reader) {
1898 return false;
1899 }
1900
1901 // 2. Copy map records from map record thread.
1902 auto callback = [this](Record* r) {
1903 UpdateRecord(r);
1904 if (ShouldOmitRecord(r)) {
1905 return true;
1906 }
1907 return record_file_writer_->WriteRecord(*r);
1908 };
1909 if (!map_record_thread_->ReadMapRecords(callback)) {
1910 return false;
1911 }
1912
1913 // 3. Copy data section from the old recording file.
1914 std::vector<char> buf(64 * 1024);
1915 uint64_t offset = reader->FileHeader().data.offset;
1916 uint64_t left_size = reader->FileHeader().data.size;
1917 while (left_size > 0) {
1918 size_t nread = std::min<size_t>(left_size, buf.size());
1919 if (!reader->ReadAtOffset(offset, buf.data(), nread) ||
1920 !record_file_writer_->WriteData(buf.data(), nread)) {
1921 return false;
1922 }
1923 offset += nread;
1924 left_size -= nread;
1925 }
1926 return true;
1927 }
1928
PostUnwindRecords()1929 bool RecordCommand::PostUnwindRecords() {
1930 auto tmp_file = ScopedTempFiles::CreateTempFile();
1931 auto reader = MoveRecordFile(tmp_file->path);
1932 if (!reader) {
1933 return false;
1934 }
1935 // Write new event attrs without regs and stacks fields.
1936 EventAttrIds attrs = reader->AttrSection();
1937 for (auto& attr : attrs) {
1938 ReplaceRegAndStackWithCallChain(attr.attr);
1939 }
1940 if (!record_file_writer_->WriteAttrSection(attrs)) {
1941 return false;
1942 }
1943
1944 sample_record_count_ = 0;
1945 auto callback = [this](std::unique_ptr<Record> record) {
1946 return SaveRecordAfterUnwinding(record.get());
1947 };
1948 return reader->ReadDataSection(callback);
1949 }
1950
JoinCallChains()1951 bool RecordCommand::JoinCallChains() {
1952 // 1. Prepare joined callchains.
1953 if (!callchain_joiner_->JoinCallChains()) {
1954 return false;
1955 }
1956 // 2. Move records from record_filename_ to a temporary file.
1957 auto tmp_file = ScopedTempFiles::CreateTempFile();
1958 auto reader = MoveRecordFile(tmp_file->path);
1959 if (!reader) {
1960 return false;
1961 }
1962
1963 // 3. Read records from the temporary file, and write record with joined call chains back
1964 // to record_filename_.
1965 auto record_callback = [&](std::unique_ptr<Record> r) {
1966 if (r->type() != PERF_RECORD_SAMPLE) {
1967 return record_file_writer_->WriteRecord(*r);
1968 }
1969 SampleRecord& sr = *static_cast<SampleRecord*>(r.get());
1970 if (!sr.HasUserCallChain()) {
1971 return record_file_writer_->WriteRecord(sr);
1972 }
1973 pid_t pid;
1974 pid_t tid;
1975 CallChainJoiner::ChainType type;
1976 std::vector<uint64_t> ips;
1977 std::vector<uint64_t> sps;
1978 if (!callchain_joiner_->GetNextCallChain(pid, tid, type, ips, sps)) {
1979 return false;
1980 }
1981 CHECK_EQ(type, CallChainJoiner::JOINED_OFFLINE);
1982 CHECK_EQ(pid, static_cast<pid_t>(sr.tid_data.pid));
1983 CHECK_EQ(tid, static_cast<pid_t>(sr.tid_data.tid));
1984 sr.UpdateUserCallChain(ips);
1985 return record_file_writer_->WriteRecord(sr);
1986 };
1987 return reader->ReadDataSection(record_callback);
1988 }
1989
LoadSymbolMapFile(int pid,const std::string & package,ThreadTree * thread_tree)1990 static void LoadSymbolMapFile(int pid, const std::string& package, ThreadTree* thread_tree) {
1991 // On Linux, symbol map files usually go to /tmp/perf-<pid>.map
1992 // On Android, there is no directory where any process can create files.
1993 // For now, use /data/local/tmp/perf-<pid>.map, which works for standalone programs,
1994 // and /data/data/<package>/perf-<pid>.map, which works for apps.
1995 auto path = package.empty()
1996 ? android::base::StringPrintf("/data/local/tmp/perf-%d.map", pid)
1997 : android::base::StringPrintf("/data/data/%s/perf-%d.map", package.c_str(), pid);
1998
1999 auto symbols = ReadSymbolMapFromFile(path);
2000 if (!symbols.empty()) {
2001 thread_tree->AddSymbolsForProcess(pid, &symbols);
2002 }
2003 }
2004
DumpAdditionalFeatures(const std::vector<std::string> & args)2005 bool RecordCommand::DumpAdditionalFeatures(const std::vector<std::string>& args) {
2006 // Read data section of perf.data to collect hit file information.
2007 thread_tree_.ClearThreadAndMap();
2008 bool kernel_symbols_available = false;
2009 std::string kallsyms;
2010 if (event_selection_set_.NeedKernelSymbol() && LoadKernelSymbols(&kallsyms)) {
2011 Dso::SetKallsyms(kallsyms);
2012 kernel_symbols_available = true;
2013 }
2014 std::unordered_set<int> loaded_symbol_maps;
2015 std::vector<uint64_t> auxtrace_offset;
2016 std::unordered_set<Dso*> debug_unwinding_files;
2017 bool failed_unwinding_sample = false;
2018
2019 auto callback = [&](const Record* r) {
2020 thread_tree_.Update(*r);
2021 if (r->type() == PERF_RECORD_SAMPLE) {
2022 auto sample = reinterpret_cast<const SampleRecord*>(r);
2023 // Symbol map files are available after recording. Load one for the process.
2024 if (loaded_symbol_maps.insert(sample->tid_data.pid).second) {
2025 LoadSymbolMapFile(sample->tid_data.pid, app_package_name_, &thread_tree_);
2026 }
2027 if (failed_unwinding_sample) {
2028 failed_unwinding_sample = false;
2029 CollectHitFileInfo(*sample, &debug_unwinding_files);
2030 } else {
2031 CollectHitFileInfo(*sample, nullptr);
2032 }
2033 } else if (r->type() == PERF_RECORD_AUXTRACE) {
2034 auto auxtrace = static_cast<const AuxTraceRecord*>(r);
2035 auxtrace_offset.emplace_back(auxtrace->location.file_offset - auxtrace->size());
2036 } else if (r->type() == SIMPLE_PERF_RECORD_UNWINDING_RESULT) {
2037 failed_unwinding_sample = true;
2038 }
2039 };
2040
2041 if (!record_file_writer_->ReadDataSection(callback)) {
2042 return false;
2043 }
2044
2045 size_t feature_count = 6;
2046 if (branch_sampling_) {
2047 feature_count++;
2048 }
2049 if (!auxtrace_offset.empty()) {
2050 feature_count++;
2051 }
2052 if (keep_failed_unwinding_debug_info_) {
2053 feature_count += 2;
2054 }
2055 if (etm_branch_list_generator_) {
2056 feature_count++;
2057 }
2058 if (!record_file_writer_->BeginWriteFeatures(feature_count)) {
2059 return false;
2060 }
2061 if (!DumpBuildIdFeature()) {
2062 return false;
2063 }
2064 if (!DumpFileFeature()) {
2065 return false;
2066 }
2067 utsname uname_buf;
2068 if (TEMP_FAILURE_RETRY(uname(&uname_buf)) != 0) {
2069 PLOG(ERROR) << "uname() failed";
2070 return false;
2071 }
2072 if (!record_file_writer_->WriteFeatureString(PerfFileFormat::FEAT_OSRELEASE, uname_buf.release)) {
2073 return false;
2074 }
2075 if (!record_file_writer_->WriteFeatureString(PerfFileFormat::FEAT_ARCH, uname_buf.machine)) {
2076 return false;
2077 }
2078
2079 std::string exec_path = android::base::GetExecutablePath();
2080 if (exec_path.empty()) exec_path = "simpleperf";
2081 std::vector<std::string> cmdline;
2082 cmdline.push_back(exec_path);
2083 cmdline.push_back("record");
2084 cmdline.insert(cmdline.end(), args.begin(), args.end());
2085 if (!record_file_writer_->WriteCmdlineFeature(cmdline)) {
2086 return false;
2087 }
2088 if (branch_sampling_ != 0 && !record_file_writer_->WriteBranchStackFeature()) {
2089 return false;
2090 }
2091 if (!DumpMetaInfoFeature(kernel_symbols_available)) {
2092 return false;
2093 }
2094 if (!auxtrace_offset.empty() && !record_file_writer_->WriteAuxTraceFeature(auxtrace_offset)) {
2095 return false;
2096 }
2097 if (keep_failed_unwinding_debug_info_ && !DumpDebugUnwindFeature(debug_unwinding_files)) {
2098 return false;
2099 }
2100 if (etm_branch_list_generator_ && !DumpETMBranchListFeature()) {
2101 return false;
2102 }
2103
2104 if (!record_file_writer_->EndWriteFeatures()) {
2105 return false;
2106 }
2107 return true;
2108 }
2109
DumpBuildIdFeature()2110 bool RecordCommand::DumpBuildIdFeature() {
2111 std::vector<BuildIdRecord> build_id_records;
2112 BuildId build_id;
2113 std::vector<Dso*> dso_v = thread_tree_.GetAllDsos();
2114 for (Dso* dso : dso_v) {
2115 // For aux tracing, we don't know which binaries are traced.
2116 // So dump build ids for all binaries.
2117 if (!dso->HasDumpId() && !event_selection_set_.HasAuxTrace()) {
2118 continue;
2119 }
2120 if (GetBuildId(*dso, build_id)) {
2121 bool in_kernel = dso->type() == DSO_KERNEL || dso->type() == DSO_KERNEL_MODULE;
2122 build_id_records.emplace_back(in_kernel, UINT_MAX, build_id, dso->Path());
2123 }
2124 }
2125 if (!record_file_writer_->WriteBuildIdFeature(build_id_records)) {
2126 return false;
2127 }
2128 return true;
2129 }
2130
DumpFileFeature()2131 bool RecordCommand::DumpFileFeature() {
2132 std::vector<Dso*> dso_v = thread_tree_.GetAllDsos();
2133 // To parse ETM data for kernel modules, we need to dump memory address for kernel modules.
2134 if (event_selection_set_.HasAuxTrace() && !event_selection_set_.ExcludeKernel()) {
2135 for (Dso* dso : dso_v) {
2136 if (dso->type() == DSO_KERNEL_MODULE) {
2137 dso->CreateDumpId();
2138 }
2139 }
2140 }
2141 return record_file_writer_->WriteFileFeatures(dso_v);
2142 }
2143
DumpMetaInfoFeature(bool kernel_symbols_available)2144 bool RecordCommand::DumpMetaInfoFeature(bool kernel_symbols_available) {
2145 std::unordered_map<std::string, std::string> info_map = extra_meta_info_;
2146 info_map["simpleperf_version"] = GetSimpleperfVersion();
2147 info_map["system_wide_collection"] = system_wide_collection_ ? "true" : "false";
2148 info_map["trace_offcpu"] = trace_offcpu_ ? "true" : "false";
2149 // By storing event types information in perf.data, the readers of perf.data have the same
2150 // understanding of event types, even if they are on another machine.
2151 info_map["event_type_info"] = ScopedEventTypes::BuildString(event_selection_set_.GetEvents());
2152 #if defined(__ANDROID__)
2153 info_map["product_props"] = android::base::StringPrintf(
2154 "%s:%s:%s", android::base::GetProperty("ro.product.manufacturer", "").c_str(),
2155 android::base::GetProperty("ro.product.model", "").c_str(),
2156 android::base::GetProperty("ro.product.name", "").c_str());
2157 info_map["android_version"] = android::base::GetProperty("ro.build.version.release", "");
2158 info_map["android_sdk_version"] = android::base::GetProperty("ro.build.version.sdk", "");
2159 info_map["android_build_type"] = android::base::GetProperty("ro.build.type", "");
2160 info_map["android_build_fingerprint"] = android::base::GetProperty("ro.build.fingerprint", "");
2161 utsname un;
2162 if (uname(&un) == 0) {
2163 info_map["kernel_version"] = un.release;
2164 }
2165 if (!app_package_name_.empty()) {
2166 info_map["app_package_name"] = app_package_name_;
2167 if (IsRoot()) {
2168 info_map["app_type"] = GetAppType(app_package_name_);
2169 }
2170 }
2171 if (event_selection_set_.HasAuxTrace()) {
2172 // used by --exclude-perf in cmd_inject.cpp
2173 info_map["recording_process"] = std::to_string(getpid());
2174 }
2175 #endif
2176 info_map["clockid"] = clockid_;
2177 info_map["timestamp"] = std::to_string(time(nullptr));
2178 info_map["kernel_symbols_available"] = kernel_symbols_available ? "true" : "false";
2179 if (dwarf_callchain_sampling_ && !unwind_dwarf_callchain_) {
2180 OfflineUnwinder::CollectMetaInfo(&info_map);
2181 }
2182 auto record_stat = event_selection_set_.GetRecordStat();
2183 info_map["record_stat"] = android::base::StringPrintf(
2184 "sample_record_count=%" PRIu64
2185 ",kernelspace_lost_records=%zu,userspace_lost_samples=%zu,"
2186 "userspace_lost_non_samples=%zu,userspace_truncated_stack_samples=%zu",
2187 sample_record_count_, record_stat.kernelspace_lost_records,
2188 record_stat.userspace_lost_samples, record_stat.userspace_lost_non_samples,
2189 record_stat.userspace_truncated_stack_samples);
2190
2191 return record_file_writer_->WriteMetaInfoFeature(info_map);
2192 }
2193
DumpDebugUnwindFeature(const std::unordered_set<Dso * > & dso_set)2194 bool RecordCommand::DumpDebugUnwindFeature(const std::unordered_set<Dso*>& dso_set) {
2195 DebugUnwindFeature debug_unwind_feature;
2196 debug_unwind_feature.reserve(dso_set.size());
2197 for (const Dso* dso : dso_set) {
2198 if (dso->type() != DSO_ELF_FILE) {
2199 continue;
2200 }
2201 const std::string& filename = dso->GetDebugFilePath();
2202 std::unique_ptr<ElfFile> elf = ElfFile::Open(filename);
2203 if (elf) {
2204 llvm::MemoryBuffer* buffer = elf->GetMemoryBuffer();
2205 debug_unwind_feature.resize(debug_unwind_feature.size() + 1);
2206 auto& debug_unwind_file = debug_unwind_feature.back();
2207 debug_unwind_file.path = filename;
2208 debug_unwind_file.size = buffer->getBufferSize();
2209 if (!record_file_writer_->WriteFeature(PerfFileFormat::FEAT_DEBUG_UNWIND_FILE,
2210 buffer->getBufferStart(), buffer->getBufferSize())) {
2211 return false;
2212 }
2213 } else {
2214 LOG(WARNING) << "failed to keep " << filename << " in debug_unwind_feature section";
2215 }
2216 }
2217 return record_file_writer_->WriteDebugUnwindFeature(debug_unwind_feature);
2218 }
2219
CollectHitFileInfo(const SampleRecord & r,std::unordered_set<Dso * > * dso_set)2220 void RecordCommand::CollectHitFileInfo(const SampleRecord& r, std::unordered_set<Dso*>* dso_set) {
2221 const ThreadEntry* thread = thread_tree_.FindThreadOrNew(r.tid_data.pid, r.tid_data.tid);
2222 size_t kernel_ip_count;
2223 std::vector<uint64_t> ips = r.GetCallChain(&kernel_ip_count);
2224 if ((r.sample_type & PERF_SAMPLE_BRANCH_STACK) != 0) {
2225 for (uint64_t i = 0; i < r.branch_stack_data.stack_nr; ++i) {
2226 const auto& item = r.branch_stack_data.stack[i];
2227 ips.push_back(item.from);
2228 ips.push_back(item.to);
2229 }
2230 }
2231 for (size_t i = 0; i < ips.size(); i++) {
2232 const MapEntry* map = thread_tree_.FindMap(thread, ips[i], i < kernel_ip_count);
2233 Dso* dso = map->dso;
2234 if (dump_symbols_) {
2235 const Symbol* symbol = thread_tree_.FindSymbol(map, ips[i], nullptr, &dso);
2236 if (!symbol->HasDumpId()) {
2237 dso->CreateSymbolDumpId(symbol);
2238 }
2239 }
2240 if (!dso->HasDumpId() && dso->type() != DSO_UNKNOWN_FILE) {
2241 dso->CreateDumpId();
2242 }
2243 if (dso_set != nullptr) {
2244 dso_set->insert(dso);
2245 }
2246 }
2247 }
2248
DumpETMBranchListFeature()2249 bool RecordCommand::DumpETMBranchListFeature() {
2250 ETMBinaryMap binary_map = etm_branch_list_generator_->GetETMBinaryMap();
2251 std::string s;
2252 if (!ETMBinaryMapToString(binary_map, s)) {
2253 return false;
2254 }
2255 return record_file_writer_->WriteFeature(PerfFileFormat::FEAT_ETM_BRANCH_LIST, s.data(),
2256 s.size());
2257 }
2258
2259 } // namespace
2260
ConsumeStr(const char * & p,const char * s)2261 static bool ConsumeStr(const char*& p, const char* s) {
2262 if (strncmp(p, s, strlen(s)) == 0) {
2263 p += strlen(s);
2264 return true;
2265 }
2266 return false;
2267 }
2268
ConsumeAddr(const char * & p,uint64_t * addr)2269 static bool ConsumeAddr(const char*& p, uint64_t* addr) {
2270 errno = 0;
2271 char* end;
2272 *addr = strtoull(p, &end, 0);
2273 if (errno == 0 && p != end) {
2274 p = end;
2275 return true;
2276 }
2277 return false;
2278 }
2279
2280 // To reduce function length, not all format errors are checked.
ParseOneAddrFilter(const std::string & s,std::vector<AddrFilter> * filters)2281 static bool ParseOneAddrFilter(const std::string& s, std::vector<AddrFilter>* filters) {
2282 std::vector<std::string> args = android::base::Split(s, " ");
2283 if (args.size() != 2) {
2284 return false;
2285 }
2286
2287 uint64_t addr1;
2288 uint64_t addr2;
2289 uint64_t off1;
2290 uint64_t off2;
2291 std::string path;
2292
2293 if (auto p = s.data(); ConsumeStr(p, "start") && ConsumeAddr(p, &addr1)) {
2294 if (*p == '\0') {
2295 // start <kernel_addr>
2296 filters->emplace_back(AddrFilter::KERNEL_START, addr1, 0, "");
2297 return true;
2298 }
2299 if (ConsumeStr(p, "@") && *p != '\0') {
2300 // start <vaddr>@<file_path>
2301 if (auto elf = ElfFile::Open(p); elf && elf->VaddrToOff(addr1, &off1) && Realpath(p, &path)) {
2302 filters->emplace_back(AddrFilter::FILE_START, off1, 0, path);
2303 return true;
2304 }
2305 }
2306 }
2307 if (auto p = s.data(); ConsumeStr(p, "stop") && ConsumeAddr(p, &addr1)) {
2308 if (*p == '\0') {
2309 // stop <kernel_addr>
2310 filters->emplace_back(AddrFilter::KERNEL_STOP, addr1, 0, "");
2311 return true;
2312 }
2313 if (ConsumeStr(p, "@") && *p != '\0') {
2314 // stop <vaddr>@<file_path>
2315 if (auto elf = ElfFile::Open(p); elf && elf->VaddrToOff(addr1, &off1) && Realpath(p, &path)) {
2316 filters->emplace_back(AddrFilter::FILE_STOP, off1, 0, path);
2317 return true;
2318 }
2319 }
2320 }
2321 if (auto p = s.data(); ConsumeStr(p, "filter") && ConsumeAddr(p, &addr1) && ConsumeStr(p, "-") &&
2322 ConsumeAddr(p, &addr2)) {
2323 if (*p == '\0') {
2324 // filter <kernel_addr_start>-<kernel_addr_end>
2325 filters->emplace_back(AddrFilter::KERNEL_RANGE, addr1, addr2 - addr1, "");
2326 return true;
2327 }
2328 if (ConsumeStr(p, "@") && *p != '\0') {
2329 // filter <vaddr_start>-<vaddr_end>@<file_path>
2330 if (auto elf = ElfFile::Open(p); elf && elf->VaddrToOff(addr1, &off1) &&
2331 elf->VaddrToOff(addr2, &off2) && Realpath(p, &path)) {
2332 filters->emplace_back(AddrFilter::FILE_RANGE, off1, off2 - off1, path);
2333 return true;
2334 }
2335 }
2336 }
2337 if (auto p = s.data(); ConsumeStr(p, "filter") && *p != '\0') {
2338 // filter <file_path>
2339 path = android::base::Trim(p);
2340 if (auto elf = ElfFile::Open(path); elf) {
2341 for (const ElfSegment& seg : elf->GetProgramHeader()) {
2342 if (seg.is_executable) {
2343 filters->emplace_back(AddrFilter::FILE_RANGE, seg.file_offset, seg.file_size, path);
2344 }
2345 }
2346 return true;
2347 }
2348 }
2349 return false;
2350 }
2351
ParseAddrFilterOption(const std::string & s)2352 std::vector<AddrFilter> ParseAddrFilterOption(const std::string& s) {
2353 std::vector<AddrFilter> filters;
2354 for (const auto& str : android::base::Split(s, ",")) {
2355 if (!ParseOneAddrFilter(str, &filters)) {
2356 LOG(ERROR) << "failed to parse addr filter: " << str;
2357 return {};
2358 }
2359 }
2360 return filters;
2361 }
2362
RegisterRecordCommand()2363 void RegisterRecordCommand() {
2364 RegisterCommand("record", [] { return std::unique_ptr<Command>(new RecordCommand()); });
2365 }
2366
2367 } // namespace simpleperf
2368