1 /*
2  * Copyright (C) 2021 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <bpf_timeinstate.h>
18 #include <gtest/gtest.h>
19 #include <test/mock_bpf_helpers.h>
20 
21 extern "C" {
22 
23 uint64_t* bpf_cpu_last_update_map_lookup_elem(uint32_t* zero);
24 uint64_t* bpf_uid_last_update_map_lookup_elem(uint32_t* uid);
25 int bpf_cpu_last_update_map_update_elem(uint32_t* zero, uint64_t* time, uint64_t flags);
26 int bpf_nr_active_map_update_elem(uint32_t* zero, uint32_t* time, uint64_t flags);
27 int bpf_cpu_policy_map_update_elem(uint32_t* zero, uint32_t* time, uint64_t flags);
28 int bpf_policy_freq_idx_map_update_elem(uint32_t* policy, uint8_t* index, uint64_t flags);
29 int bpf_policy_nr_active_map_update_elem(uint32_t* policy, uint32_t* active, uint64_t flags);
30 uint8_t* bpf_policy_freq_idx_map_lookup_elem(uint32_t* policy);
31 int bpf_policy_freq_idx_map_update_elem(uint32_t* policy, uint8_t* index, uint64_t flags);
32 int bpf_freq_to_idx_map_update_elem(freq_idx_key_t* freq_idx_key, uint8_t* index, uint64_t flags);
33 tis_val_t* bpf_uid_time_in_state_map_lookup_elem(time_key_t* key);
34 concurrent_val_t* bpf_uid_concurrent_times_map_lookup_elem(time_key_t* key);
35 int bpf_cpu_last_pid_map_update_elem(uint32_t* zero, pid_t* pid, uint64_t flags);
36 
37 struct switch_args {
38     unsigned long long ignore;
39     char prev_comm[16];
40     int prev_pid;
41     int prev_prio;
42     long long prev_state;
43     char next_comm[16];
44     int next_pid;
45     int next_prio;
46 };
47 
48 int tp_sched_switch(struct switch_args* args);
49 
50 struct cpufreq_args {
51     unsigned long long ignore;
52     unsigned int state;
53     unsigned int cpu_id;
54 };
55 
56 int tp_cpufreq(struct cpufreq_args* args);
57 
58 }  // extern "C"
59 
enableTracking()60 static void enableTracking() {
61     uint32_t zero = 0;
62     bpf_nr_active_map_update_elem(&zero, &zero, BPF_ANY);
63 }
64 
65 // Defines a CPU cluster <policy> containing CPUs <cpu_ids> with available frequencies
66 // <frequencies> and marks it as <active>
initCpuPolicy(uint32_t policy,std::vector<uint32_t> cpuIds,std::vector<uint32_t> frequencies,bool active)67 static void initCpuPolicy(uint32_t policy, std::vector<uint32_t> cpuIds,
68                           std::vector<uint32_t> frequencies, bool active) {
69     for (uint32_t cpuId : cpuIds) {
70         bpf_cpu_policy_map_update_elem(&cpuId, &policy, BPF_ANY);
71 
72         mock_bpf_set_smp_processor_id(cpuId);
73 
74         // Initialize time - this must be done per-CPU
75         uint32_t zero = 0;
76         uint64_t time = 0;
77         bpf_cpu_last_update_map_update_elem(&zero, &time, BPF_ANY);
78 
79         pid_t pid = 0;
80         bpf_cpu_last_pid_map_update_elem(&zero, &pid, BPF_ANY);
81     }
82     for (uint8_t i = 0; i < frequencies.size(); i++) {
83         uint8_t index = i + 1;  // Frequency indexes start with 1
84         freq_idx_key_t freqIdxKey{.policy = policy, .freq = frequencies[i]};
85         bpf_freq_to_idx_map_update_elem(&freqIdxKey, &index, BPF_ANY);
86     }
87     if (active) {
88         uint32_t zero = 0;
89         bpf_policy_nr_active_map_update_elem(&policy, &zero, BPF_ANY);
90     }
91 }
92 
noteCpuFrequencyChange(uint32_t cpuId,uint32_t frequency)93 static void noteCpuFrequencyChange(uint32_t cpuId, uint32_t frequency) {
94     cpufreq_args args{.cpu_id = cpuId, .state = frequency};
95     int ret = tp_cpufreq(&args);  // Tracepoint event power/cpu_frequency
96     ASSERT_EQ(1, ret);
97 }
98 
noteSchedSwitch(pid_t prevPid,pid_t nextPid)99 static void noteSchedSwitch(pid_t prevPid, pid_t nextPid) {
100     switch_args args{.prev_pid = prevPid, .next_pid = nextPid};
101     int ret = tp_sched_switch(&args);  // Tracepoint event sched/sched_switch
102     ASSERT_EQ(1, ret);
103 }
104 
assertTimeInState(uint32_t uid,uint32_t bucket,std::vector<uint64_t> expectedTimeInState)105 static void assertTimeInState(uint32_t uid, uint32_t bucket,
106                               std::vector<uint64_t> expectedTimeInState) {
107     time_key_t timeKey{.uid = uid, .bucket = bucket};
108     tis_val_t* value = bpf_uid_time_in_state_map_lookup_elem(&timeKey);
109     ASSERT_TRUE(value);
110 
111     for (int i = 0; i < FREQS_PER_ENTRY; i++) {
112         if (i < expectedTimeInState.size()) {
113             ASSERT_EQ(expectedTimeInState[i], value->ar[i]);
114         } else {
115             ASSERT_EQ(0, value->ar[i]);
116         }
117     }
118 }
119 
assertConcurrentTimes(uint32_t uid,uint32_t bucket,std::vector<uint64_t> expectedPolicy,std::vector<uint64_t> expectedActive)120 static void assertConcurrentTimes(uint32_t uid, uint32_t bucket,
121                                   std::vector<uint64_t> expectedPolicy,
122                                   std::vector<uint64_t> expectedActive) {
123     time_key_t timeKey{.uid = uid, .bucket = bucket};
124     concurrent_val_t* value = bpf_uid_concurrent_times_map_lookup_elem(&timeKey);
125     ASSERT_TRUE(value);
126 
127     for (int i = 0; i < CPUS_PER_ENTRY; i++) {
128         if (i < expectedPolicy.size()) {
129             ASSERT_EQ(expectedPolicy[i], value->policy[i]);
130         } else {
131             ASSERT_EQ(0, value->policy[i]);
132         }
133     }
134 
135     for (int i = 0; i < CPUS_PER_ENTRY; i++) {
136         if (i < expectedActive.size()) {
137             ASSERT_EQ(expectedActive[i], value->active[i]);
138         } else {
139             ASSERT_EQ(0, value->active[i]);
140         }
141     }
142 }
143 
assertUidLastUpdateTime(uint32_t uid,uint64_t expectedTime)144 static void assertUidLastUpdateTime(uint32_t uid, uint64_t expectedTime) {
145     uint64_t* value = bpf_uid_last_update_map_lookup_elem(&uid);
146     ASSERT_TRUE(value);
147     ASSERT_EQ(expectedTime, *value);
148 }
149 
TEST(time_in_state,tp_cpufreq)150 TEST(time_in_state, tp_cpufreq) {
151     initCpuPolicy(0, {0, 1, 2}, {1000, 2000}, true);
152     initCpuPolicy(1, {3, 4}, {3000, 4000, 5000}, true);
153 
154     noteCpuFrequencyChange(1, 2000);
155     {
156         uint32_t policy = 0;  // CPU 1 belongs to Cluster 0
157         uint8_t* freqIndex = bpf_policy_freq_idx_map_lookup_elem(&policy);
158         ASSERT_TRUE(freqIndex);
159         // Freq idx starts with 1. Cluster 0 is now running at the _second_ frequency
160         ASSERT_EQ(2, *freqIndex);
161     }
162 
163     noteCpuFrequencyChange(4, 5000);
164     {
165         uint32_t policy = 1;  // CPU 4 belongs to Cluster 1
166         uint8_t* freqIndex = bpf_policy_freq_idx_map_lookup_elem(&policy);
167         ASSERT_TRUE(freqIndex);
168         // Freq idx starts with 1. Cluster 1 is now running at the _third_ frequency
169         ASSERT_EQ(3, *freqIndex);
170     }
171 }
172 
TEST(time_in_state,tp_sched_switch)173 TEST(time_in_state, tp_sched_switch) {
174     mock_bpf_set_ktime_ns(1000);
175     mock_bpf_set_current_uid_gid(42);
176 
177     initCpuPolicy(0, {0, 1, 2}, {1000, 2000}, true);
178     initCpuPolicy(1, {3, 4}, {3000, 4000, 5000}, true);
179 
180     enableTracking();
181 
182     mock_bpf_set_smp_processor_id(2);
183 
184     // First call is ignored, because there is no "delta" to be computed
185     noteSchedSwitch(0, 100);
186 
187     noteCpuFrequencyChange(2, 1000);
188 
189     mock_bpf_set_ktime_ns(1314);
190 
191     noteSchedSwitch(100, 200);
192 
193     // 1314 - 1000 = 314
194     assertTimeInState(42, 0, {314, 0});
195     assertConcurrentTimes(42, 0, {314, 0, 0, 0, 0}, {314, 0, 0, 0, 0});
196 
197     mock_bpf_set_current_uid_gid(51);
198     mock_bpf_set_smp_processor_id(3);
199 
200     // First call on this CPU is also ignored
201     noteSchedSwitch(200, 300);
202 
203     mock_bpf_set_ktime_ns(2718);
204 
205     noteCpuFrequencyChange(3, 5000);
206     noteSchedSwitch(300, 400);
207 
208     mock_bpf_set_ktime_ns(5859);
209 
210     noteCpuFrequencyChange(3, 4000);
211     noteSchedSwitch(400, 500);
212 
213     assertTimeInState(51, 0, {0, 5859 - 2718, 2718 - 1314});
214 
215     // (2718-1314)+(5859-2718) = 4545
216     assertConcurrentTimes(51, 0, {4545, 0, 0, 0, 0}, {0, 4545, 0, 0, 0});
217 
218     assertUidLastUpdateTime(42, 1314);
219     assertUidLastUpdateTime(51, 5859);
220 }
221 
TEST(time_in_state,tp_sched_switch_active_cpus)222 TEST(time_in_state, tp_sched_switch_active_cpus) {
223     mock_bpf_set_ktime_ns(1000);
224     mock_bpf_set_current_uid_gid(42);
225 
226     initCpuPolicy(0, {0}, {1000, 2000}, true);
227 
228     enableTracking();
229 
230     mock_bpf_set_smp_processor_id(0);
231 
232     noteSchedSwitch(0, 1);
233 
234     mock_bpf_set_ktime_ns(1100);
235 
236     noteSchedSwitch(0, 1);
237 
238     mock_bpf_set_ktime_ns(1200);
239 
240     noteSchedSwitch(1, 2);
241 
242     assertConcurrentTimes(42, 0, {100}, {100});
243 }
244 
TEST(time_in_state,tp_sched_switch_sdk_sandbox)245 TEST(time_in_state, tp_sched_switch_sdk_sandbox) {
246     mock_bpf_set_ktime_ns(1000);
247     mock_bpf_set_current_uid_gid(AID_SDK_SANDBOX_PROCESS_START);
248 
249     initCpuPolicy(0, {0}, {1000, 2000}, true);
250 
251     enableTracking();
252 
253     mock_bpf_set_smp_processor_id(0);
254 
255     noteSchedSwitch(0, 1);
256 
257     mock_bpf_set_ktime_ns(1100);
258 
259     noteSchedSwitch(1, 2);
260 
261     assertTimeInState(AID_APP_START, 0, {100, 0});
262     assertTimeInState(AID_SDK_SANDBOX, 0, {100, 0});
263 
264     assertConcurrentTimes(AID_APP_START, 0, {100}, {100});
265     assertConcurrentTimes(AID_SDK_SANDBOX, 0, {100}, {100});
266 }
267