1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "image.h"
18 
19 #include <lz4.h>
20 #include <lz4hc.h>
21 #include <sstream>
22 #include <sys/stat.h>
23 #include <zlib.h>
24 
25 #include "android-base/stringprintf.h"
26 
27 #include "base/bit_utils.h"
28 #include "base/length_prefixed_array.h"
29 #include "base/utils.h"
30 #include "mirror/object-inl.h"
31 #include "mirror/object_array-inl.h"
32 #include "mirror/object_array.h"
33 
34 namespace art HIDDEN {
35 
36 const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
37 // Last change: Add unsignedRemainder intrinsics.
38 const uint8_t ImageHeader::kImageVersion[] = { '1', '1', '1', '\0' };
39 
ImageHeader(uint32_t image_reservation_size,uint32_t component_count,uint32_t image_begin,uint32_t image_size,ImageSection * sections,uint32_t image_roots,uint32_t oat_checksum,uint32_t oat_file_begin,uint32_t oat_data_begin,uint32_t oat_data_end,uint32_t oat_file_end,uint32_t boot_image_begin,uint32_t boot_image_size,uint32_t boot_image_component_count,uint32_t boot_image_checksum,PointerSize pointer_size)40 ImageHeader::ImageHeader(uint32_t image_reservation_size,
41                          uint32_t component_count,
42                          uint32_t image_begin,
43                          uint32_t image_size,
44                          ImageSection* sections,
45                          uint32_t image_roots,
46                          uint32_t oat_checksum,
47                          uint32_t oat_file_begin,
48                          uint32_t oat_data_begin,
49                          uint32_t oat_data_end,
50                          uint32_t oat_file_end,
51                          uint32_t boot_image_begin,
52                          uint32_t boot_image_size,
53                          uint32_t boot_image_component_count,
54                          uint32_t boot_image_checksum,
55                          PointerSize pointer_size)
56   : image_reservation_size_(image_reservation_size),
57     component_count_(component_count),
58     image_begin_(image_begin),
59     image_size_(image_size),
60     image_checksum_(0u),
61     oat_checksum_(oat_checksum),
62     oat_file_begin_(oat_file_begin),
63     oat_data_begin_(oat_data_begin),
64     oat_data_end_(oat_data_end),
65     oat_file_end_(oat_file_end),
66     boot_image_begin_(boot_image_begin),
67     boot_image_size_(boot_image_size),
68     boot_image_component_count_(boot_image_component_count),
69     boot_image_checksum_(boot_image_checksum),
70     image_roots_(image_roots),
71     pointer_size_(pointer_size) {
72   CHECK_EQ(image_begin, RoundUp(image_begin, kElfSegmentAlignment));
73   if (oat_checksum != 0u) {
74     CHECK_EQ(oat_file_begin, RoundUp(oat_file_begin, kElfSegmentAlignment));
75     CHECK_EQ(oat_data_begin, RoundUp(oat_data_begin, kElfSegmentAlignment));
76     CHECK_LT(image_roots, oat_file_begin);
77     CHECK_LE(oat_file_begin, oat_data_begin);
78     CHECK_LT(oat_data_begin, oat_data_end);
79     CHECK_LE(oat_data_end, oat_file_end);
80   }
81   static_assert(sizeof(PointerSize) == sizeof(uint32_t),
82                 "PointerSize class is expected to be a uint32_t for the header");
83   memcpy(magic_, kImageMagic, sizeof(kImageMagic));
84   memcpy(version_, kImageVersion, sizeof(kImageVersion));
85   std::copy_n(sections, kSectionCount, sections_);
86 }
87 
RelocateImageReferences(int64_t delta)88 void ImageHeader::RelocateImageReferences(int64_t delta) {
89   // App Images can be relocated to a page aligned address.
90   // Unlike with the Boot Image, for which the memory is reserved in advance of
91   // loading and is aligned to kElfSegmentAlignment, the App Images can be mapped
92   // without reserving memory i.e. via direct file mapping in which case the
93   // memory range is aligned by the kernel and the only guarantee is that it is
94   // aligned to the page sizes.
95   //
96   // NOTE: While this might be less than alignment required via information in
97   //       the ELF header, it should be sufficient in practice as the only reason
98   //       for the ELF segment alignment to be more than one page size is the
99   //       compatibility of the ELF with system configurations that use larger
100   //       page size.
101   //
102   //       Adding preliminary memory reservation would introduce certain overhead.
103   //
104   //       However, technically the alignment requirement isn't fulfilled and that
105   //       might be worth addressing even if it adds certain overhead. This will have
106   //       to be done in alignment with the dynamic linker's ELF loader as
107   //       otherwise inconsistency would still be possible e.g. when using
108   //       `dlopen`-like calls to load OAT files.
109   CHECK_ALIGNED_PARAM(delta, gPageSize) << "relocation delta must be page aligned";
110   oat_file_begin_ += delta;
111   oat_data_begin_ += delta;
112   oat_data_end_ += delta;
113   oat_file_end_ += delta;
114   image_begin_ += delta;
115   image_roots_ += delta;
116 }
117 
RelocateBootImageReferences(int64_t delta)118 void ImageHeader::RelocateBootImageReferences(int64_t delta) {
119   CHECK_ALIGNED(delta, kElfSegmentAlignment) << "relocation delta must be Elf segment aligned";
120   DCHECK_EQ(boot_image_begin_ != 0u, boot_image_size_ != 0u);
121   if (boot_image_begin_ != 0u) {
122     boot_image_begin_ += delta;
123   }
124   for (size_t i = 0; i < kImageMethodsCount; ++i) {
125     image_methods_[i] += delta;
126   }
127 }
128 
IsAppImage() const129 bool ImageHeader::IsAppImage() const {
130   // Unlike boot image and boot image extensions which include address space for
131   // oat files in their reservation size, app images are loaded separately from oat
132   // files and their reservation size is the image size rounded up to Elf alignment.
133   return image_reservation_size_ == RoundUp(image_size_, kElfSegmentAlignment);
134 }
135 
GetImageSpaceCount() const136 uint32_t ImageHeader::GetImageSpaceCount() const {
137   DCHECK(!IsAppImage());
138   DCHECK_NE(component_count_, 0u);  // Must be the header for the first component.
139   // For images compiled with --single-image, there is only one oat file. To detect
140   // that, check whether the reservation ends at the end of the first oat file.
141   return (image_begin_ + image_reservation_size_ == oat_file_end_) ? 1u : component_count_;
142 }
143 
IsValid() const144 bool ImageHeader::IsValid() const {
145   if (memcmp(magic_, kImageMagic, sizeof(kImageMagic)) != 0) {
146     return false;
147   }
148   if (memcmp(version_, kImageVersion, sizeof(kImageVersion)) != 0) {
149     return false;
150   }
151   if (!IsAligned<kElfSegmentAlignment>(image_reservation_size_)) {
152     return false;
153   }
154   // Unsigned so wraparound is well defined.
155   if (image_begin_ >= image_begin_ + image_size_) {
156     return false;
157   }
158   if (oat_checksum_ != 0u) {
159     if (oat_file_begin_ > oat_file_end_) {
160       return false;
161     }
162     if (oat_data_begin_ > oat_data_end_) {
163       return false;
164     }
165     if (oat_file_begin_ >= oat_data_begin_) {
166       return false;
167     }
168   }
169   return true;
170 }
171 
GetMagic() const172 const char* ImageHeader::GetMagic() const {
173   CHECK(IsValid());
174   return reinterpret_cast<const char*>(magic_);
175 }
176 
GetImageMethod(ImageMethod index) const177 ArtMethod* ImageHeader::GetImageMethod(ImageMethod index) const {
178   CHECK_LT(static_cast<size_t>(index), kImageMethodsCount);
179   return reinterpret_cast<ArtMethod*>(image_methods_[index]);
180 }
181 
operator <<(std::ostream & os,const ImageSection & section)182 std::ostream& operator<<(std::ostream& os, const ImageSection& section) {
183   return os << "size=" << section.Size() << " range=" << section.Offset() << "-" << section.End();
184 }
185 
VisitObjects(ObjectVisitor * visitor,uint8_t * base,PointerSize pointer_size) const186 void ImageHeader::VisitObjects(ObjectVisitor* visitor,
187                                uint8_t* base,
188                                PointerSize pointer_size) const {
189   DCHECK_EQ(pointer_size, GetPointerSize());
190   const ImageSection& objects = GetObjectsSection();
191   static const size_t kStartPos = RoundUp(sizeof(ImageHeader), kObjectAlignment);
192   for (size_t pos = kStartPos; pos < objects.Size(); ) {
193     mirror::Object* object = reinterpret_cast<mirror::Object*>(base + objects.Offset() + pos);
194     visitor->Visit(object);
195     pos += RoundUp(object->SizeOf(), kObjectAlignment);
196   }
197 }
198 
GetPointerSize() const199 PointerSize ImageHeader::GetPointerSize() const {
200   return pointer_size_;
201 }
202 
LZ4_decompress_safe_checked(const char * source,char * dest,int compressed_size,int max_decompressed_size,size_t * decompressed_size_checked,std::string * error_msg)203 bool LZ4_decompress_safe_checked(const char* source,
204                                  char* dest,
205                                  int compressed_size,
206                                  int max_decompressed_size,
207                                  /*out*/ size_t* decompressed_size_checked,
208                                  /*out*/ std::string* error_msg) {
209   int decompressed_size = LZ4_decompress_safe(source, dest, compressed_size, max_decompressed_size);
210   if (UNLIKELY(decompressed_size < 0)) {
211     *error_msg = android::base::StringPrintf("LZ4_decompress_safe() returned negative size: %d",
212                                              decompressed_size);
213     return false;
214   } else {
215     *decompressed_size_checked = static_cast<size_t>(decompressed_size);
216     return true;
217   }
218 }
219 
Decompress(uint8_t * out_ptr,const uint8_t * in_ptr,std::string * error_msg) const220 bool ImageHeader::Block::Decompress(uint8_t* out_ptr,
221                                     const uint8_t* in_ptr,
222                                     std::string* error_msg) const {
223   switch (storage_mode_) {
224     case kStorageModeUncompressed: {
225       CHECK_EQ(image_size_, data_size_);
226       memcpy(out_ptr + image_offset_, in_ptr + data_offset_, data_size_);
227       break;
228     }
229     case kStorageModeLZ4:
230     case kStorageModeLZ4HC: {
231       // LZ4HC and LZ4 have same internal format, both use LZ4_decompress.
232       size_t decompressed_size;
233       bool ok = LZ4_decompress_safe_checked(
234           reinterpret_cast<const char*>(in_ptr) + data_offset_,
235           reinterpret_cast<char*>(out_ptr) + image_offset_,
236           data_size_,
237           image_size_,
238           &decompressed_size,
239           error_msg);
240       if (!ok) {
241         return false;
242       }
243       if (decompressed_size != image_size_) {
244         if (error_msg != nullptr) {
245           // Maybe some disk / memory corruption, just bail.
246           *error_msg = (std::ostringstream() << "Decompressed size different than image size: "
247                                              << decompressed_size << ", and " << image_size_).str();
248         }
249         return false;
250       }
251       break;
252     }
253     default: {
254       if (error_msg != nullptr) {
255         *error_msg = (std::ostringstream() << "Invalid image format " << storage_mode_).str();
256       }
257       return false;
258     }
259   }
260   return true;
261 }
262 
GetImageSectionName(ImageSections index)263 const char* ImageHeader::GetImageSectionName(ImageSections index) {
264   switch (index) {
265     case kSectionObjects: return "Objects";
266     case kSectionArtFields: return "ArtFields";
267     case kSectionArtMethods: return "ArtMethods";
268     case kSectionImTables: return "ImTables";
269     case kSectionIMTConflictTables: return "IMTConflictTables";
270     case kSectionRuntimeMethods: return "RuntimeMethods";
271     case kSectionJniStubMethods: return "JniStubMethods";
272     case kSectionInternedStrings: return "InternedStrings";
273     case kSectionClassTable: return "ClassTable";
274     case kSectionStringReferenceOffsets: return "StringReferenceOffsets";
275     case kSectionDexCacheArrays: return "DexCacheArrays";
276     case kSectionMetadata: return "Metadata";
277     case kSectionImageBitmap: return "ImageBitmap";
278     case kSectionCount: return nullptr;
279   }
280 }
281 
282 // Compress data from `source` into `storage`.
CompressData(ArrayRef<const uint8_t> source,ImageHeader::StorageMode image_storage_mode,dchecked_vector<uint8_t> * storage)283 static bool CompressData(ArrayRef<const uint8_t> source,
284                          ImageHeader::StorageMode image_storage_mode,
285                          /*out*/ dchecked_vector<uint8_t>* storage) {
286   const uint64_t compress_start_time = NanoTime();
287 
288   // Bound is same for both LZ4 and LZ4HC.
289   storage->resize(LZ4_compressBound(source.size()));
290   size_t data_size = 0;
291   if (image_storage_mode == ImageHeader::kStorageModeLZ4) {
292     data_size = LZ4_compress_default(
293         reinterpret_cast<char*>(const_cast<uint8_t*>(source.data())),
294         reinterpret_cast<char*>(storage->data()),
295         source.size(),
296         storage->size());
297   } else {
298     DCHECK_EQ(image_storage_mode, ImageHeader::kStorageModeLZ4HC);
299     data_size = LZ4_compress_HC(
300         reinterpret_cast<const char*>(const_cast<uint8_t*>(source.data())),
301         reinterpret_cast<char*>(storage->data()),
302         source.size(),
303         storage->size(),
304         LZ4HC_CLEVEL_MAX);
305   }
306 
307   if (data_size == 0) {
308     return false;
309   }
310   storage->resize(data_size);
311 
312   VLOG(image) << "Compressed from " << source.size() << " to " << storage->size() << " in "
313               << PrettyDuration(NanoTime() - compress_start_time);
314   if (kIsDebugBuild) {
315     dchecked_vector<uint8_t> decompressed(source.size());
316     size_t decompressed_size;
317     std::string error_msg;
318     bool ok = LZ4_decompress_safe_checked(
319         reinterpret_cast<char*>(storage->data()),
320         reinterpret_cast<char*>(decompressed.data()),
321         storage->size(),
322         decompressed.size(),
323         &decompressed_size,
324         &error_msg);
325     if (!ok) {
326       LOG(FATAL) << error_msg;
327       UNREACHABLE();
328     }
329     CHECK_EQ(decompressed_size, decompressed.size());
330     CHECK_EQ(memcmp(source.data(), decompressed.data(), source.size()), 0) << image_storage_mode;
331   }
332   return true;
333 }
334 
WriteData(const ImageFileGuard & image_file,const uint8_t * data,const uint8_t * bitmap_data,ImageHeader::StorageMode image_storage_mode,uint32_t max_image_block_size,bool update_checksum,std::string * error_msg)335 bool ImageHeader::WriteData(const ImageFileGuard& image_file,
336                             const uint8_t* data,
337                             const uint8_t* bitmap_data,
338                             ImageHeader::StorageMode image_storage_mode,
339                             uint32_t max_image_block_size,
340                             bool update_checksum,
341                             std::string* error_msg) {
342   const bool is_compressed = image_storage_mode != ImageHeader::kStorageModeUncompressed;
343   dchecked_vector<std::pair<uint32_t, uint32_t>> block_sources;
344   dchecked_vector<ImageHeader::Block> blocks;
345 
346   // Add a set of solid blocks such that no block is larger than the maximum size. A solid block
347   // is a block that must be decompressed all at once.
348   auto add_blocks = [&](uint32_t offset, uint32_t size) {
349     while (size != 0u) {
350       const uint32_t cur_size = std::min(size, max_image_block_size);
351       block_sources.emplace_back(offset, cur_size);
352       offset += cur_size;
353       size -= cur_size;
354     }
355   };
356 
357   add_blocks(sizeof(ImageHeader), this->GetImageSize() - sizeof(ImageHeader));
358 
359   // Checksum of compressed image data and header.
360   uint32_t image_checksum = 0u;
361   if (update_checksum) {
362     image_checksum = adler32(0L, Z_NULL, 0);
363     image_checksum = adler32(image_checksum,
364                              reinterpret_cast<const uint8_t*>(this),
365                              sizeof(ImageHeader));
366   }
367 
368   // Copy and compress blocks.
369   uint32_t out_offset = sizeof(ImageHeader);
370   for (const std::pair<uint32_t, uint32_t> block : block_sources) {
371     ArrayRef<const uint8_t> raw_image_data(data + block.first, block.second);
372     dchecked_vector<uint8_t> compressed_data;
373     ArrayRef<const uint8_t> image_data;
374     if (is_compressed) {
375       if (!CompressData(raw_image_data, image_storage_mode, &compressed_data)) {
376         *error_msg = "Error compressing data for " +
377             image_file->GetPath() + ": " + std::string(strerror(errno));
378         return false;
379       }
380       image_data = ArrayRef<const uint8_t>(compressed_data);
381     } else {
382       image_data = raw_image_data;
383       // For uncompressed, preserve alignment since the image will be directly mapped.
384       out_offset = block.first;
385     }
386 
387     // Fill in the compressed location of the block.
388     blocks.emplace_back(ImageHeader::Block(
389         image_storage_mode,
390         /*data_offset=*/ out_offset,
391         /*data_size=*/ image_data.size(),
392         /*image_offset=*/ block.first,
393         /*image_size=*/ block.second));
394 
395     if (!image_file->PwriteFully(image_data.data(), image_data.size(), out_offset)) {
396       *error_msg = "Failed to write image file data " +
397           image_file->GetPath() + ": " + std::string(strerror(errno));
398       return false;
399     }
400     out_offset += image_data.size();
401     if (update_checksum) {
402       image_checksum = adler32(image_checksum, image_data.data(), image_data.size());
403     }
404   }
405 
406   if (is_compressed) {
407     // Align up since the compressed data is not necessarily aligned.
408     out_offset = RoundUp(out_offset, alignof(ImageHeader::Block));
409     CHECK(!blocks.empty());
410     const size_t blocks_bytes = blocks.size() * sizeof(blocks[0]);
411     if (!image_file->PwriteFully(&blocks[0], blocks_bytes, out_offset)) {
412       *error_msg = "Failed to write image blocks " +
413           image_file->GetPath() + ": " + std::string(strerror(errno));
414       return false;
415     }
416     this->blocks_offset_ = out_offset;
417     this->blocks_count_ = blocks.size();
418     out_offset += blocks_bytes;
419   }
420 
421   // Data size includes everything except the bitmap.
422   this->data_size_ = out_offset - sizeof(ImageHeader);
423 
424   // Update and write the bitmap section. Note that the bitmap section is relative to the
425   // possibly compressed image.
426   ImageSection& bitmap_section = GetImageSection(ImageHeader::kSectionImageBitmap);
427   // Align up since data size may be unaligned if the image is compressed.
428   out_offset = RoundUp(out_offset, kElfSegmentAlignment);
429   bitmap_section = ImageSection(out_offset, bitmap_section.Size());
430 
431   if (!image_file->PwriteFully(bitmap_data,
432                                bitmap_section.Size(),
433                                bitmap_section.Offset())) {
434     *error_msg = "Failed to write image file bitmap " +
435         image_file->GetPath() + ": " + std::string(strerror(errno));
436     return false;
437   }
438 
439   int err = image_file->Flush();
440   if (err < 0) {
441     *error_msg = "Failed to flush image file " + image_file->GetPath() + ": " + std::to_string(err);
442     return false;
443   }
444 
445   if (update_checksum) {
446     // Calculate the image checksum of the remaining data.
447     image_checksum = adler32(image_checksum,
448                              reinterpret_cast<const uint8_t*>(bitmap_data),
449                              bitmap_section.Size());
450     this->SetImageChecksum(image_checksum);
451   }
452 
453   if (VLOG_IS_ON(image)) {
454     const size_t separately_written_section_size = bitmap_section.Size();
455     const size_t total_uncompressed_size = image_size_ + separately_written_section_size;
456     const size_t total_compressed_size = out_offset + separately_written_section_size;
457 
458     VLOG(compiler) << "UncompressedImageSize = " << total_uncompressed_size;
459     if (total_uncompressed_size != total_compressed_size) {
460       VLOG(compiler) << "CompressedImageSize = " << total_compressed_size;
461     }
462   }
463 
464   DCHECK_EQ(bitmap_section.End(), static_cast<size_t>(image_file->GetLength()))
465       << "Bitmap should be at the end of the file";
466   return true;
467 }
468 
469 }  // namespace art
470