1 /*
2  ** Copyright 2011, The Android Open Source Project
3  **
4  ** Licensed under the Apache License, Version 2.0 (the "License");
5  ** you may not use this file except in compliance with the License.
6  ** You may obtain a copy of the License at
7  **
8  **     http://www.apache.org/licenses/LICENSE-2.0
9  **
10  ** Unless required by applicable law or agreed to in writing, software
11  ** distributed under the License is distributed on an "AS IS" BASIS,
12  ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  ** See the License for the specific language governing permissions and
14  ** limitations under the License.
15  */
16 
17 //#define LOG_NDEBUG 0
18 #define ATRACE_TAG ATRACE_TAG_GRAPHICS
19 
20 #include "BlobCache.h"
21 
22 #include <android-base/properties.h>
23 #include <errno.h>
24 #include <inttypes.h>
25 #include <log/log.h>
26 #include <utils/Trace.h>
27 
28 #include <chrono>
29 
30 namespace android {
31 
32 // BlobCache::Header::mMagicNumber value
33 static const uint32_t blobCacheMagic = ('_' << 24) + ('B' << 16) + ('b' << 8) + '$';
34 
35 // BlobCache::Header::mBlobCacheVersion value
36 static const uint32_t blobCacheVersion = 3;
37 
38 // BlobCache::Header::mDeviceVersion value
39 static const uint32_t blobCacheDeviceVersion = 1;
40 
BlobCache(size_t maxKeySize,size_t maxValueSize,size_t maxTotalSize)41 BlobCache::BlobCache(size_t maxKeySize, size_t maxValueSize, size_t maxTotalSize)
42       : mMaxTotalSize(maxTotalSize),
43         mMaxKeySize(maxKeySize),
44         mMaxValueSize(maxValueSize),
45         mTotalSize(0) {
46     int64_t now = std::chrono::steady_clock::now().time_since_epoch().count();
47 #ifdef _WIN32
48     srand(now);
49 #else
50     mRandState[0] = (now >> 0) & 0xFFFF;
51     mRandState[1] = (now >> 16) & 0xFFFF;
52     mRandState[2] = (now >> 32) & 0xFFFF;
53 #endif
54     ALOGV("initializing random seed using %lld", (unsigned long long)now);
55 }
56 
set(const void * key,size_t keySize,const void * value,size_t valueSize)57 BlobCache::InsertResult BlobCache::set(const void* key, size_t keySize, const void* value,
58                                        size_t valueSize) {
59     if (mMaxKeySize < keySize) {
60         ALOGV("set: not caching because the key is too large: %zu (limit: %zu)", keySize,
61               mMaxKeySize);
62         return InsertResult::kKeyTooBig;
63     }
64     if (mMaxValueSize < valueSize) {
65         ALOGV("set: not caching because the value is too large: %zu (limit: %zu)", valueSize,
66               mMaxValueSize);
67         return InsertResult::kValueTooBig;
68     }
69     if (mMaxTotalSize < keySize + valueSize) {
70         ALOGV("set: not caching because the combined key/value size is too "
71               "large: %zu (limit: %zu)",
72               keySize + valueSize, mMaxTotalSize);
73         return InsertResult::kCombinedTooBig;
74     }
75     if (keySize == 0) {
76         ALOGW("set: not caching because keySize is 0");
77         return InsertResult::kInvalidKeySize;
78     }
79     if (valueSize == 0) {
80         ALOGW("set: not caching because valueSize is 0");
81         return InsertResult::kInvalidValueSize;
82     }
83 
84     std::shared_ptr<Blob> cacheKey(new Blob(key, keySize, false));
85     CacheEntry cacheEntry(cacheKey, nullptr);
86 
87     bool didClean = false;
88     while (true) {
89         auto index = std::lower_bound(mCacheEntries.begin(), mCacheEntries.end(), cacheEntry);
90         if (index == mCacheEntries.end() || cacheEntry < *index) {
91             // Create a new cache entry.
92             std::shared_ptr<Blob> keyBlob(new Blob(key, keySize, true));
93             std::shared_ptr<Blob> valueBlob(new Blob(value, valueSize, true));
94             size_t newTotalSize = mTotalSize + keySize + valueSize;
95             if (mMaxTotalSize < newTotalSize) {
96                 if (isCleanable()) {
97                     // Clean the cache and try again.
98                     clean();
99                     didClean = true;
100                     continue;
101                 } else {
102                     ALOGV("set: not caching new key/value pair because the "
103                           "total cache size limit would be exceeded: %zu "
104                           "(limit: %zu)",
105                           keySize + valueSize, mMaxTotalSize);
106                     return InsertResult::kNotEnoughSpace;
107                 }
108             }
109             mCacheEntries.insert(index, CacheEntry(keyBlob, valueBlob));
110             mTotalSize = newTotalSize;
111             ALOGV("set: created new cache entry with %zu byte key and %zu byte value", keySize,
112                   valueSize);
113         } else {
114             // Update the existing cache entry.
115             std::shared_ptr<Blob> valueBlob(new Blob(value, valueSize, true));
116             std::shared_ptr<Blob> oldValueBlob(index->getValue());
117             size_t newTotalSize = mTotalSize + valueSize - oldValueBlob->getSize();
118             if (mMaxTotalSize < newTotalSize) {
119                 if (isCleanable()) {
120                     // Clean the cache and try again.
121                     clean();
122                     didClean = true;
123                     continue;
124                 } else {
125                     ALOGV("set: not caching new value because the total cache "
126                           "size limit would be exceeded: %zu (limit: %zu)",
127                           keySize + valueSize, mMaxTotalSize);
128                     return InsertResult::kNotEnoughSpace;
129                 }
130             }
131             index->setValue(valueBlob);
132             mTotalSize = newTotalSize;
133             ALOGV("set: updated existing cache entry with %zu byte key and %zu byte "
134                   "value",
135                   keySize, valueSize);
136         }
137         return didClean ? InsertResult::kDidClean : InsertResult::kInserted;
138     }
139 }
140 
get(const void * key,size_t keySize,void * value,size_t valueSize)141 size_t BlobCache::get(const void* key, size_t keySize, void* value, size_t valueSize) {
142     if (mMaxKeySize < keySize) {
143         ALOGV("get: not searching because the key is too large: %zu (limit %zu)", keySize,
144               mMaxKeySize);
145         return 0;
146     }
147     std::shared_ptr<Blob> cacheKey(new Blob(key, keySize, false));
148     CacheEntry cacheEntry(cacheKey, nullptr);
149     auto index = std::lower_bound(mCacheEntries.begin(), mCacheEntries.end(), cacheEntry);
150     if (index == mCacheEntries.end() || cacheEntry < *index) {
151         ALOGV("get: no cache entry found for key of size %zu", keySize);
152         return 0;
153     }
154 
155     // The key was found. Return the value if the caller's buffer is large
156     // enough.
157     std::shared_ptr<Blob> valueBlob(index->getValue());
158     size_t valueBlobSize = valueBlob->getSize();
159     if (valueBlobSize <= valueSize) {
160         ALOGV("get: copying %zu bytes to caller's buffer", valueBlobSize);
161         memcpy(value, valueBlob->getData(), valueBlobSize);
162     } else {
163         ALOGV("get: caller's buffer is too small for value: %zu (needs %zu)", valueSize,
164               valueBlobSize);
165     }
166     return valueBlobSize;
167 }
168 
align4(size_t size)169 static inline size_t align4(size_t size) {
170     return (size + 3) & ~3;
171 }
172 
getFlattenedSize() const173 size_t BlobCache::getFlattenedSize() const {
174     auto buildId = base::GetProperty("ro.build.id", "");
175     size_t size = align4(sizeof(Header) + buildId.size());
176     for (const CacheEntry& e : mCacheEntries) {
177         std::shared_ptr<Blob> const& keyBlob = e.getKey();
178         std::shared_ptr<Blob> const& valueBlob = e.getValue();
179         size += align4(sizeof(EntryHeader) + keyBlob->getSize() + valueBlob->getSize());
180     }
181     return size;
182 }
183 
flatten(void * buffer,size_t size) const184 int BlobCache::flatten(void* buffer, size_t size) const {
185     // Write the cache header
186     if (size < sizeof(Header)) {
187         ALOGE("flatten: not enough room for cache header");
188         return 0;
189     }
190     Header* header = reinterpret_cast<Header*>(buffer);
191     header->mMagicNumber = blobCacheMagic;
192     header->mBlobCacheVersion = blobCacheVersion;
193     header->mDeviceVersion = blobCacheDeviceVersion;
194     header->mNumEntries = mCacheEntries.size();
195     auto buildId = base::GetProperty("ro.build.id", "");
196     header->mBuildIdLength = buildId.size();
197     memcpy(header->mBuildId, buildId.c_str(), header->mBuildIdLength);
198 
199     // Write cache entries
200     uint8_t* byteBuffer = reinterpret_cast<uint8_t*>(buffer);
201     off_t byteOffset = align4(sizeof(Header) + header->mBuildIdLength);
202     for (const CacheEntry& e : mCacheEntries) {
203         std::shared_ptr<Blob> const& keyBlob = e.getKey();
204         std::shared_ptr<Blob> const& valueBlob = e.getValue();
205         size_t keySize = keyBlob->getSize();
206         size_t valueSize = valueBlob->getSize();
207 
208         size_t entrySize = sizeof(EntryHeader) + keySize + valueSize;
209         size_t totalSize = align4(entrySize);
210         if (byteOffset + totalSize > size) {
211             ALOGE("flatten: not enough room for cache entries");
212             return -EINVAL;
213         }
214 
215         EntryHeader* eheader = reinterpret_cast<EntryHeader*>(&byteBuffer[byteOffset]);
216         eheader->mKeySize = keySize;
217         eheader->mValueSize = valueSize;
218 
219         memcpy(eheader->mData, keyBlob->getData(), keySize);
220         memcpy(eheader->mData + keySize, valueBlob->getData(), valueSize);
221 
222         if (totalSize > entrySize) {
223             // We have padding bytes. Those will get written to storage, and contribute to the CRC,
224             // so make sure we zero-them to have reproducible results.
225             memset(eheader->mData + keySize + valueSize, 0, totalSize - entrySize);
226         }
227 
228         byteOffset += totalSize;
229     }
230 
231     return 0;
232 }
233 
unflatten(void const * buffer,size_t size)234 int BlobCache::unflatten(void const* buffer, size_t size) {
235     ATRACE_NAME("BlobCache::unflatten");
236 
237     // All errors should result in the BlobCache being in an empty state.
238     clear();
239 
240     // Read the cache header
241     if (size < sizeof(Header)) {
242         ALOGE("unflatten: not enough room for cache header");
243         return -EINVAL;
244     }
245     const Header* header = reinterpret_cast<const Header*>(buffer);
246     if (header->mMagicNumber != blobCacheMagic) {
247         ALOGE("unflatten: bad magic number: %" PRIu32, header->mMagicNumber);
248         return -EINVAL;
249     }
250     auto buildId = base::GetProperty("ro.build.id", "");
251     if (header->mBlobCacheVersion != blobCacheVersion ||
252         header->mDeviceVersion != blobCacheDeviceVersion ||
253         buildId.size() != header->mBuildIdLength ||
254         strncmp(buildId.c_str(), header->mBuildId, buildId.size())) {
255         // We treat version mismatches as an empty cache.
256         return 0;
257     }
258 
259     // Read cache entries
260     const uint8_t* byteBuffer = reinterpret_cast<const uint8_t*>(buffer);
261     off_t byteOffset = align4(sizeof(Header) + header->mBuildIdLength);
262     size_t numEntries = header->mNumEntries;
263     for (size_t i = 0; i < numEntries; i++) {
264         if (byteOffset + sizeof(EntryHeader) > size) {
265             clear();
266             ALOGE("unflatten: not enough room for cache entry headers");
267             return -EINVAL;
268         }
269 
270         const EntryHeader* eheader = reinterpret_cast<const EntryHeader*>(&byteBuffer[byteOffset]);
271         size_t keySize = eheader->mKeySize;
272         size_t valueSize = eheader->mValueSize;
273         size_t entrySize = sizeof(EntryHeader) + keySize + valueSize;
274 
275         size_t totalSize = align4(entrySize);
276         if (byteOffset + totalSize > size) {
277             clear();
278             ALOGE("unflatten: not enough room for cache entry headers");
279             return -EINVAL;
280         }
281 
282         const uint8_t* data = eheader->mData;
283         set(data, keySize, data + keySize, valueSize);
284 
285         byteOffset += totalSize;
286     }
287 
288     return 0;
289 }
290 
blob_random()291 long int BlobCache::blob_random() {
292 #ifdef _WIN32
293     return rand();
294 #else
295     return nrand48(mRandState);
296 #endif
297 }
298 
clean()299 void BlobCache::clean() {
300     ATRACE_NAME("BlobCache::clean");
301 
302     // Remove a random cache entry until the total cache size gets below half
303     // the maximum total cache size.
304     while (mTotalSize > mMaxTotalSize / 2) {
305         size_t i = size_t(blob_random() % (mCacheEntries.size()));
306         const CacheEntry& entry(mCacheEntries[i]);
307         mTotalSize -= entry.getKey()->getSize() + entry.getValue()->getSize();
308         mCacheEntries.erase(mCacheEntries.begin() + i);
309     }
310 }
311 
isCleanable() const312 bool BlobCache::isCleanable() const {
313     return mTotalSize > mMaxTotalSize / 2;
314 }
315 
Blob(const void * data,size_t size,bool copyData)316 BlobCache::Blob::Blob(const void* data, size_t size, bool copyData)
317       : mData(copyData ? malloc(size) : data), mSize(size), mOwnsData(copyData) {
318     if (data != nullptr && copyData) {
319         memcpy(const_cast<void*>(mData), data, size);
320     }
321 }
322 
~Blob()323 BlobCache::Blob::~Blob() {
324     if (mOwnsData) {
325         free(const_cast<void*>(mData));
326     }
327 }
328 
operator <(const Blob & rhs) const329 bool BlobCache::Blob::operator<(const Blob& rhs) const {
330     if (mSize == rhs.mSize) {
331         return memcmp(mData, rhs.mData, mSize) < 0;
332     } else {
333         return mSize < rhs.mSize;
334     }
335 }
336 
getData() const337 const void* BlobCache::Blob::getData() const {
338     return mData;
339 }
340 
getSize() const341 size_t BlobCache::Blob::getSize() const {
342     return mSize;
343 }
344 
CacheEntry()345 BlobCache::CacheEntry::CacheEntry() {}
346 
CacheEntry(const std::shared_ptr<Blob> & key,const std::shared_ptr<Blob> & value)347 BlobCache::CacheEntry::CacheEntry(const std::shared_ptr<Blob>& key,
348                                   const std::shared_ptr<Blob>& value)
349       : mKey(key), mValue(value) {}
350 
CacheEntry(const CacheEntry & ce)351 BlobCache::CacheEntry::CacheEntry(const CacheEntry& ce) : mKey(ce.mKey), mValue(ce.mValue) {}
352 
operator <(const CacheEntry & rhs) const353 bool BlobCache::CacheEntry::operator<(const CacheEntry& rhs) const {
354     return *mKey < *rhs.mKey;
355 }
356 
operator =(const CacheEntry & rhs)357 const BlobCache::CacheEntry& BlobCache::CacheEntry::operator=(const CacheEntry& rhs) {
358     mKey = rhs.mKey;
359     mValue = rhs.mValue;
360     return *this;
361 }
362 
getKey() const363 std::shared_ptr<BlobCache::Blob> BlobCache::CacheEntry::getKey() const {
364     return mKey;
365 }
366 
getValue() const367 std::shared_ptr<BlobCache::Blob> BlobCache::CacheEntry::getValue() const {
368     return mValue;
369 }
370 
setValue(const std::shared_ptr<Blob> & value)371 void BlobCache::CacheEntry::setValue(const std::shared_ptr<Blob>& value) {
372     mValue = value;
373 }
374 
375 } // namespace android
376