1 /*
2 * Copyright (C) 2019, The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <aidl/android/os/BnPullAtomCallback.h>
18 #include <aidl/android/os/IPullAtomResultReceiver.h>
19 #include <aidl/android/os/IStatsd.h>
20 #include <aidl/android/util/StatsEventParcel.h>
21 #include <android/binder_auto_utils.h>
22 #include <android/binder_ibinder.h>
23 #include <android/binder_manager.h>
24 #include <stats_event.h>
25 #include <stats_pull_atom_callback.h>
26
27 #include <map>
28 #include <queue>
29 #include <thread>
30 #include <vector>
31
32 using Status = ::ndk::ScopedAStatus;
33 using aidl::android::os::BnPullAtomCallback;
34 using aidl::android::os::IPullAtomResultReceiver;
35 using aidl::android::os::IStatsd;
36 using aidl::android::util::StatsEventParcel;
37 using ::ndk::SharedRefBase;
38
39 struct AStatsEventList {
40 std::vector<AStatsEvent*> data;
41 };
42
AStatsEventList_addStatsEvent(AStatsEventList * pull_data)43 AStatsEvent* AStatsEventList_addStatsEvent(AStatsEventList* pull_data) {
44 AStatsEvent* event = AStatsEvent_obtain();
45 pull_data->data.push_back(event);
46 return event;
47 }
48
49 constexpr int64_t DEFAULT_COOL_DOWN_MILLIS = 1000LL; // 1 second.
50 constexpr int64_t DEFAULT_TIMEOUT_MILLIS = 1500LL; // 1.5 seconds.
51
52 struct AStatsManager_PullAtomMetadata {
53 int64_t cool_down_millis;
54 int64_t timeout_millis;
55 std::vector<int32_t> additive_fields;
56 };
57
AStatsManager_PullAtomMetadata_obtain()58 AStatsManager_PullAtomMetadata* AStatsManager_PullAtomMetadata_obtain() {
59 AStatsManager_PullAtomMetadata* metadata = new AStatsManager_PullAtomMetadata();
60 metadata->cool_down_millis = DEFAULT_COOL_DOWN_MILLIS;
61 metadata->timeout_millis = DEFAULT_TIMEOUT_MILLIS;
62 metadata->additive_fields = std::vector<int32_t>();
63 return metadata;
64 }
65
AStatsManager_PullAtomMetadata_release(AStatsManager_PullAtomMetadata * metadata)66 void AStatsManager_PullAtomMetadata_release(AStatsManager_PullAtomMetadata* metadata) {
67 delete metadata;
68 }
69
AStatsManager_PullAtomMetadata_setCoolDownMillis(AStatsManager_PullAtomMetadata * metadata,int64_t cool_down_millis)70 void AStatsManager_PullAtomMetadata_setCoolDownMillis(AStatsManager_PullAtomMetadata* metadata,
71 int64_t cool_down_millis) {
72 metadata->cool_down_millis = cool_down_millis;
73 }
74
AStatsManager_PullAtomMetadata_getCoolDownMillis(AStatsManager_PullAtomMetadata * metadata)75 int64_t AStatsManager_PullAtomMetadata_getCoolDownMillis(AStatsManager_PullAtomMetadata* metadata) {
76 return metadata->cool_down_millis;
77 }
78
AStatsManager_PullAtomMetadata_setTimeoutMillis(AStatsManager_PullAtomMetadata * metadata,int64_t timeout_millis)79 void AStatsManager_PullAtomMetadata_setTimeoutMillis(AStatsManager_PullAtomMetadata* metadata,
80 int64_t timeout_millis) {
81 metadata->timeout_millis = timeout_millis;
82 }
83
AStatsManager_PullAtomMetadata_getTimeoutMillis(AStatsManager_PullAtomMetadata * metadata)84 int64_t AStatsManager_PullAtomMetadata_getTimeoutMillis(AStatsManager_PullAtomMetadata* metadata) {
85 return metadata->timeout_millis;
86 }
87
AStatsManager_PullAtomMetadata_setAdditiveFields(AStatsManager_PullAtomMetadata * metadata,int32_t * additive_fields,int32_t num_fields)88 void AStatsManager_PullAtomMetadata_setAdditiveFields(AStatsManager_PullAtomMetadata* metadata,
89 int32_t* additive_fields,
90 int32_t num_fields) {
91 metadata->additive_fields.assign(additive_fields, additive_fields + num_fields);
92 }
93
AStatsManager_PullAtomMetadata_getNumAdditiveFields(AStatsManager_PullAtomMetadata * metadata)94 int32_t AStatsManager_PullAtomMetadata_getNumAdditiveFields(
95 AStatsManager_PullAtomMetadata* metadata) {
96 return metadata->additive_fields.size();
97 }
98
AStatsManager_PullAtomMetadata_getAdditiveFields(AStatsManager_PullAtomMetadata * metadata,int32_t * fields)99 void AStatsManager_PullAtomMetadata_getAdditiveFields(AStatsManager_PullAtomMetadata* metadata,
100 int32_t* fields) {
101 std::copy(metadata->additive_fields.begin(), metadata->additive_fields.end(), fields);
102 }
103
104 class StatsPullAtomCallbackInternal : public BnPullAtomCallback {
105 public:
StatsPullAtomCallbackInternal(const AStatsManager_PullAtomCallback callback,void * cookie,const int64_t coolDownMillis,const int64_t timeoutMillis,const std::vector<int32_t> additiveFields)106 StatsPullAtomCallbackInternal(const AStatsManager_PullAtomCallback callback, void* cookie,
107 const int64_t coolDownMillis, const int64_t timeoutMillis,
108 const std::vector<int32_t> additiveFields)
109 : mCallback(callback),
110 mCookie(cookie),
111 mCoolDownMillis(coolDownMillis),
112 mTimeoutMillis(timeoutMillis),
113 mAdditiveFields(additiveFields) {}
114
onPullAtom(int32_t atomTag,const std::shared_ptr<IPullAtomResultReceiver> & resultReceiver)115 Status onPullAtom(int32_t atomTag,
116 const std::shared_ptr<IPullAtomResultReceiver>& resultReceiver) override {
117 AStatsEventList statsEventList;
118 int successInt = mCallback(atomTag, &statsEventList, mCookie);
119 bool success = successInt == AStatsManager_PULL_SUCCESS;
120
121 // Convert stats_events into StatsEventParcels.
122 std::vector<StatsEventParcel> parcels;
123
124 for (int i = 0; i < statsEventList.data.size(); i++) {
125 size_t size;
126 uint8_t* buffer = AStatsEvent_getBuffer(statsEventList.data[i], &size);
127
128 StatsEventParcel p;
129 // vector.assign() creates a copy, but this is inevitable unless
130 // stats_event.h/c uses a vector as opposed to a buffer.
131 p.buffer.assign(buffer, buffer + size);
132 parcels.push_back(std::move(p));
133 }
134
135 Status status = resultReceiver->pullFinished(atomTag, success, parcels);
136 if (!status.isOk()) {
137 std::vector<StatsEventParcel> emptyParcels;
138 resultReceiver->pullFinished(atomTag, /*success=*/false, emptyParcels);
139 }
140 for (int i = 0; i < statsEventList.data.size(); i++) {
141 AStatsEvent_release(statsEventList.data[i]);
142 }
143 return Status::ok();
144 }
145
getCoolDownMillis() const146 int64_t getCoolDownMillis() const { return mCoolDownMillis; }
getTimeoutMillis() const147 int64_t getTimeoutMillis() const { return mTimeoutMillis; }
getAdditiveFields() const148 const std::vector<int32_t>& getAdditiveFields() const { return mAdditiveFields; }
149
150 private:
151 const AStatsManager_PullAtomCallback mCallback;
152 void* mCookie;
153 const int64_t mCoolDownMillis;
154 const int64_t mTimeoutMillis;
155 const std::vector<int32_t> mAdditiveFields;
156 };
157
158 /**
159 * @brief pullersMutex is used to guard simultaneous access to pullers from below threads
160 * Main thread
161 * - AStatsManager_setPullAtomCallback()
162 * - AStatsManager_clearPullAtomCallback()
163 * Binder thread:
164 * - StatsdProvider::binderDied()
165 */
166 static std::mutex pullersMutex;
167
168 static std::map<int32_t, std::shared_ptr<StatsPullAtomCallbackInternal>> pullers;
169
170 class StatsdProvider {
171 public:
StatsdProvider()172 StatsdProvider() : mDeathRecipient(AIBinder_DeathRecipient_new(binderDied)) {
173 }
174
~StatsdProvider()175 ~StatsdProvider() {
176 resetStatsService();
177 }
178
getStatsService()179 std::shared_ptr<IStatsd> getStatsService() {
180 // There are host unit tests which are using libstatspull
181 // Since we do not have statsd on host - the getStatsService() is no-op and
182 // should return nullptr
183 #ifdef __ANDROID__
184 std::lock_guard<std::mutex> lock(mStatsdMutex);
185 if (!mStatsd) {
186 // Fetch statsd
187 ::ndk::SpAIBinder binder(AServiceManager_getService("stats"));
188 mStatsd = IStatsd::fromBinder(binder);
189 if (mStatsd) {
190 AIBinder_linkToDeath(binder.get(), mDeathRecipient.get(), this);
191 }
192 }
193 #endif // __ANDROID__
194 return mStatsd;
195 }
196
resetStatsService()197 void resetStatsService() {
198 std::lock_guard<std::mutex> lock(mStatsdMutex);
199 mStatsd = nullptr;
200 }
201
binderDied(void * cookie)202 static void binderDied(void* cookie) {
203 StatsdProvider* statsProvider = static_cast<StatsdProvider*>(cookie);
204 statsProvider->resetStatsService();
205
206 std::shared_ptr<IStatsd> statsService = statsProvider->getStatsService();
207 if (statsService == nullptr) {
208 return;
209 }
210
211 // Since we do not want to make an IPC with the lock held, we first create a
212 // copy of the data with the lock held before iterating through the map.
213 std::map<int32_t, std::shared_ptr<StatsPullAtomCallbackInternal>> pullersCopy;
214 {
215 std::lock_guard<std::mutex> lock(pullersMutex);
216 pullersCopy = pullers;
217 }
218 for (const auto& it : pullersCopy) {
219 statsService->registerNativePullAtomCallback(it.first, it.second->getCoolDownMillis(),
220 it.second->getTimeoutMillis(),
221 it.second->getAdditiveFields(), it.second);
222 }
223 }
224
225 private:
226 /**
227 * @brief mStatsdMutex is used to guard simultaneous access to mStatsd from below threads:
228 * Work thread
229 * - registerStatsPullAtomCallbackBlocking()
230 * - unregisterStatsPullAtomCallbackBlocking()
231 * Binder thread:
232 * - StatsdProvider::binderDied()
233 */
234 std::mutex mStatsdMutex;
235 std::shared_ptr<IStatsd> mStatsd;
236 ::ndk::ScopedAIBinder_DeathRecipient mDeathRecipient;
237 };
238
239 static std::shared_ptr<StatsdProvider> statsProvider = std::make_shared<StatsdProvider>();
240
registerStatsPullAtomCallbackBlocking(int32_t atomTag,std::shared_ptr<StatsdProvider> statsProvider,std::shared_ptr<StatsPullAtomCallbackInternal> cb)241 void registerStatsPullAtomCallbackBlocking(int32_t atomTag,
242 std::shared_ptr<StatsdProvider> statsProvider,
243 std::shared_ptr<StatsPullAtomCallbackInternal> cb) {
244 const std::shared_ptr<IStatsd> statsService = statsProvider->getStatsService();
245 if (statsService == nullptr) {
246 // Statsd not available
247 return;
248 }
249
250 statsService->registerNativePullAtomCallback(
251 atomTag, cb->getCoolDownMillis(), cb->getTimeoutMillis(), cb->getAdditiveFields(), cb);
252 }
253
unregisterStatsPullAtomCallbackBlocking(int32_t atomTag,std::shared_ptr<StatsdProvider> statsProvider)254 void unregisterStatsPullAtomCallbackBlocking(int32_t atomTag,
255 std::shared_ptr<StatsdProvider> statsProvider) {
256 const std::shared_ptr<IStatsd> statsService = statsProvider->getStatsService();
257 if (statsService == nullptr) {
258 // Statsd not available
259 return;
260 }
261
262 statsService->unregisterNativePullAtomCallback(atomTag);
263 }
264
265 class CallbackOperationsHandler {
266 struct Cmd {
267 enum Type { CMD_REGISTER, CMD_UNREGISTER };
268
269 Type type;
270 int atomTag;
271 std::shared_ptr<StatsPullAtomCallbackInternal> callback;
272 };
273
274 public:
~CallbackOperationsHandler()275 ~CallbackOperationsHandler() {
276 if (mWorkThread.joinable()) {
277 mWorkThread.join();
278 }
279 }
280
getInstance()281 static CallbackOperationsHandler& getInstance() {
282 static CallbackOperationsHandler handler;
283 return handler;
284 }
285
registerCallback(int atomTag,std::shared_ptr<StatsPullAtomCallbackInternal> callback)286 void registerCallback(int atomTag, std::shared_ptr<StatsPullAtomCallbackInternal> callback) {
287 auto registerCmd = std::make_unique<Cmd>();
288 registerCmd->type = Cmd::CMD_REGISTER;
289 registerCmd->atomTag = atomTag;
290 registerCmd->callback = std::move(callback);
291 pushToQueue(std::move(registerCmd));
292
293 startWorkerThread();
294 }
295
unregisterCallback(int atomTag)296 void unregisterCallback(int atomTag) {
297 auto unregisterCmd = std::make_unique<Cmd>();
298 unregisterCmd->type = Cmd::CMD_UNREGISTER;
299 unregisterCmd->atomTag = atomTag;
300 pushToQueue(std::move(unregisterCmd));
301
302 startWorkerThread();
303 }
304
305 private:
306 std::atomic_bool mThreadAlive = false;
307 std::thread mWorkThread;
308
309 std::mutex mMutex;
310 std::queue<std::unique_ptr<Cmd>> mCmdQueue;
311
CallbackOperationsHandler()312 CallbackOperationsHandler() {
313 }
314
pushToQueue(std::unique_ptr<Cmd> cmd)315 void pushToQueue(std::unique_ptr<Cmd> cmd) {
316 std::unique_lock<std::mutex> lock(mMutex);
317 mCmdQueue.push(std::move(cmd));
318 }
319
startWorkerThread()320 void startWorkerThread() {
321 // Only spawn one thread to manage requests
322 if (mThreadAlive) {
323 return;
324 }
325 mThreadAlive = true;
326 if (mWorkThread.joinable()) {
327 mWorkThread.join();
328 }
329 mWorkThread = std::thread(&CallbackOperationsHandler::processCommands, this, statsProvider);
330 }
331
processCommands(std::shared_ptr<StatsdProvider> statsProvider)332 void processCommands(std::shared_ptr<StatsdProvider> statsProvider) {
333 /**
334 * First trying to obtain stats service instance
335 * This is a blocking call, which waits on service readiness
336 */
337 const std::shared_ptr<IStatsd> statsService = statsProvider->getStatsService();
338
339 if (!statsService) {
340 // Statsd not available - dropping all submitted command requests
341 std::queue<std::unique_ptr<Cmd>> emptyQueue;
342 std::unique_lock<std::mutex> lock(mMutex);
343 mCmdQueue.swap(emptyQueue);
344 mThreadAlive = false;
345 return;
346 }
347
348 while (true) {
349 std::unique_ptr<Cmd> cmd = nullptr;
350 {
351 /**
352 * To guarantee sequential commands processing we need to lock mutex queue
353 */
354 std::unique_lock<std::mutex> lock(mMutex);
355 if (mCmdQueue.empty()) {
356 mThreadAlive = false;
357 return;
358 }
359
360 cmd = std::move(mCmdQueue.front());
361 mCmdQueue.pop();
362 }
363
364 switch (cmd->type) {
365 case Cmd::CMD_REGISTER: {
366 registerStatsPullAtomCallbackBlocking(cmd->atomTag, statsProvider,
367 cmd->callback);
368 break;
369 }
370 case Cmd::CMD_UNREGISTER: {
371 unregisterStatsPullAtomCallbackBlocking(cmd->atomTag, statsProvider);
372 break;
373 }
374 }
375 }
376 }
377 };
378
AStatsManager_setPullAtomCallback(int32_t atom_tag,AStatsManager_PullAtomMetadata * metadata,AStatsManager_PullAtomCallback callback,void * cookie)379 void AStatsManager_setPullAtomCallback(int32_t atom_tag, AStatsManager_PullAtomMetadata* metadata,
380 AStatsManager_PullAtomCallback callback, void* cookie) {
381 int64_t coolDownMillis =
382 metadata == nullptr ? DEFAULT_COOL_DOWN_MILLIS : metadata->cool_down_millis;
383 int64_t timeoutMillis = metadata == nullptr ? DEFAULT_TIMEOUT_MILLIS : metadata->timeout_millis;
384
385 std::vector<int32_t> additiveFields;
386 if (metadata != nullptr) {
387 additiveFields = metadata->additive_fields;
388 }
389
390 std::shared_ptr<StatsPullAtomCallbackInternal> callbackBinder =
391 SharedRefBase::make<StatsPullAtomCallbackInternal>(callback, cookie, coolDownMillis,
392 timeoutMillis, additiveFields);
393
394 {
395 std::lock_guard<std::mutex> lock(pullersMutex);
396 // Always add to the map. If statsd is dead, we will add them when it comes back.
397 pullers[atom_tag] = callbackBinder;
398 }
399
400 CallbackOperationsHandler::getInstance().registerCallback(atom_tag, callbackBinder);
401 }
402
AStatsManager_clearPullAtomCallback(int32_t atom_tag)403 void AStatsManager_clearPullAtomCallback(int32_t atom_tag) {
404 {
405 std::lock_guard<std::mutex> lock(pullersMutex);
406 // Always remove the puller from our map.
407 // If statsd is down, we will not register it when it comes back.
408 pullers.erase(atom_tag);
409 }
410
411 CallbackOperationsHandler::getInstance().unregisterCallback(atom_tag);
412 }
413