1 //
2 // Copyright (C) 2018 The Android Open Source Project
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 // http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 //
16
17 #include "update_engine/payload_consumer/verity_writer_android.h"
18
19 #include <fcntl.h>
20
21 #include <algorithm>
22 #include <memory>
23 #include <utility>
24
25 #include <base/logging.h>
26 #include <base/posix/eintr_wrapper.h>
27 #include <fec/ecc.h>
28 extern "C" {
29 #include <fec.h>
30 }
31
32 #include "update_engine/common/utils.h"
33 #include "update_engine/payload_consumer/cached_file_descriptor.h"
34 #include "update_engine/payload_consumer/file_descriptor.h"
35
36 namespace chromeos_update_engine {
37
Init(const uint64_t _data_offset,const uint64_t _data_size,const uint64_t _fec_offset,const uint64_t _fec_size,const uint64_t _fec_roots,const uint64_t _block_size,const bool _verify_mode)38 bool IncrementalEncodeFEC::Init(const uint64_t _data_offset,
39 const uint64_t _data_size,
40 const uint64_t _fec_offset,
41 const uint64_t _fec_size,
42 const uint64_t _fec_roots,
43 const uint64_t _block_size,
44 const bool _verify_mode) {
45 current_step_ = EncodeFECStep::kInitFDStep;
46 data_offset_ = _data_offset;
47 data_size_ = _data_size;
48 fec_offset_ = _fec_offset;
49 fec_size_ = _fec_size;
50 fec_roots_ = _fec_roots;
51 block_size_ = _block_size;
52 verify_mode_ = _verify_mode;
53 current_round_ = 0;
54 // This is the N in RS(M, N), which is the number of bytes for each rs block.
55 rs_n_ = FEC_RSM - fec_roots_;
56 rs_char_.reset(init_rs_char(FEC_PARAMS(fec_roots_)));
57 rs_blocks_.resize(block_size_ * rs_n_);
58 buffer_.resize(block_size_, 0);
59 fec_.resize(block_size_ * fec_roots_);
60 fec_read_.resize(fec_.size());
61 TEST_AND_RETURN_FALSE(data_size_ % block_size_ == 0);
62 TEST_AND_RETURN_FALSE(fec_roots_ >= 0 && fec_roots_ < FEC_RSM);
63
64 num_rounds_ = utils::DivRoundUp(data_size_ / block_size_, rs_n_);
65 TEST_AND_RETURN_FALSE(num_rounds_ * fec_roots_ * block_size_ == fec_size_);
66 TEST_AND_RETURN_FALSE(rs_char_ != nullptr);
67 return true;
68 }
69
Compute(FileDescriptor * _read_fd,FileDescriptor * _write_fd)70 bool IncrementalEncodeFEC::Compute(FileDescriptor* _read_fd,
71 FileDescriptor* _write_fd) {
72 if (current_step_ == EncodeFECStep::kInitFDStep) {
73 read_fd_ = _read_fd;
74 write_fd_ = _write_fd;
75 cache_fd_.SetFD(write_fd_);
76 write_fd_ = &cache_fd_;
77 } else if (current_step_ == EncodeFECStep::kEncodeRoundStep) {
78 // Encodes |block_size| number of rs blocks each round so that we can read
79 // one block each time instead of 1 byte to increase random read
80 // performance. This uses about 1 MiB memory for 4K block size.
81 for (size_t j = 0; j < rs_n_; j++) {
82 uint64_t offset = fec_ecc_interleave(
83 current_round_ * rs_n_ * block_size_ + j, rs_n_, num_rounds_);
84 // Don't read past |data_size|, treat them as 0.
85 if (offset >= data_size_) {
86 std::fill(buffer_.begin(), buffer_.end(), 0);
87 } else {
88 ssize_t bytes_read = 0;
89 TEST_AND_RETURN_FALSE(utils::PReadAll(read_fd_,
90 buffer_.data(),
91 buffer_.size(),
92 data_offset_ + offset,
93 &bytes_read));
94 TEST_AND_RETURN_FALSE(bytes_read >= 0);
95 TEST_AND_RETURN_FALSE(static_cast<size_t>(bytes_read) ==
96 buffer_.size());
97 }
98 for (size_t k = 0; k < buffer_.size(); k++) {
99 rs_blocks_[k * rs_n_ + j] = buffer_[k];
100 }
101 }
102 for (size_t j = 0; j < block_size_; j++) {
103 // Encode [j * rs_n_ : (j + 1) * rs_n_) in |rs_blocks| and write
104 // |fec_roots| number of parity bytes to |j * fec_roots| in |fec|.
105 encode_rs_char(rs_char_.get(),
106 rs_blocks_.data() + j * rs_n_,
107 fec_.data() + j * fec_roots_);
108 }
109
110 if (verify_mode_) {
111 ssize_t bytes_read = 0;
112 TEST_AND_RETURN_FALSE(utils::PReadAll(read_fd_,
113 fec_read_.data(),
114 fec_read_.size(),
115 fec_offset_,
116 &bytes_read));
117 TEST_AND_RETURN_FALSE(bytes_read >= 0);
118 TEST_AND_RETURN_FALSE(static_cast<size_t>(bytes_read) ==
119 fec_read_.size());
120 TEST_AND_RETURN_FALSE(fec_ == fec_read_);
121 } else {
122 CHECK(write_fd_);
123 write_fd_->Seek(fec_offset_, SEEK_SET);
124 if (!utils::WriteAll(write_fd_, fec_.data(), fec_.size())) {
125 PLOG(ERROR) << "EncodeFEC write() failed";
126 return false;
127 }
128 }
129 fec_offset_ += fec_.size();
130 current_round_++;
131 } else if (current_step_ == EncodeFECStep::kWriteStep) {
132 write_fd_->Flush();
133 }
134 UpdateState();
135 return true;
136 }
137 // update the current state of EncodeFEC. Can be changed to have smaller steps
UpdateState()138 void IncrementalEncodeFEC::UpdateState() {
139 if (current_step_ == EncodeFECStep::kInitFDStep) {
140 current_step_ = EncodeFECStep::kEncodeRoundStep;
141 } else if (current_step_ == EncodeFECStep::kEncodeRoundStep &&
142 current_round_ == num_rounds_) {
143 current_step_ = EncodeFECStep::kWriteStep;
144 } else if (current_step_ == EncodeFECStep::kWriteStep) {
145 current_step_ = EncodeFECStep::kComplete;
146 }
147 }
148
Finished() const149 bool IncrementalEncodeFEC::Finished() const {
150 return current_step_ == EncodeFECStep::kComplete;
151 }
152
ReportProgress() const153 double IncrementalEncodeFEC::ReportProgress() const {
154 if (num_rounds_ == 0) {
155 return 1.0;
156 }
157 return static_cast<double>(current_round_) / num_rounds_;
158 }
159
160 namespace verity_writer {
CreateVerityWriter()161 std::unique_ptr<VerityWriterInterface> CreateVerityWriter() {
162 return std::make_unique<VerityWriterAndroid>();
163 }
164 } // namespace verity_writer
165
Init(const InstallPlan::Partition & partition)166 bool VerityWriterAndroid::Init(const InstallPlan::Partition& partition) {
167 partition_ = &partition;
168 LOG(INFO) << "Initializing Incremental EncodeFEC";
169 TEST_AND_RETURN_FALSE(encodeFEC_.Init(partition_->fec_data_offset,
170 partition_->fec_data_size,
171 partition_->fec_offset,
172 partition_->fec_size,
173 partition_->fec_roots,
174 partition_->block_size,
175 false /* verify_mode */));
176 hash_tree_written_ = false;
177 if (partition_->hash_tree_size != 0) {
178 auto hash_function =
179 HashTreeBuilder::HashFunction(partition_->hash_tree_algorithm);
180 if (hash_function == nullptr) {
181 LOG(ERROR) << "Verity hash algorithm not supported: "
182 << partition_->hash_tree_algorithm;
183 return false;
184 }
185 hash_tree_builder_ = std::make_unique<HashTreeBuilder>(
186 partition_->block_size, hash_function);
187 TEST_AND_RETURN_FALSE(hash_tree_builder_->Initialize(
188 partition_->hash_tree_data_size, partition_->hash_tree_salt));
189 if (hash_tree_builder_->CalculateSize(partition_->hash_tree_data_size) !=
190 partition_->hash_tree_size) {
191 LOG(ERROR) << "Verity hash tree size does not match, stored: "
192 << partition_->hash_tree_size << ", calculated: "
193 << hash_tree_builder_->CalculateSize(
194 partition_->hash_tree_data_size);
195 return false;
196 }
197 }
198 total_offset_ = 0;
199 return true;
200 }
201
Update(const uint64_t offset,const uint8_t * buffer,size_t size)202 bool VerityWriterAndroid::Update(const uint64_t offset,
203 const uint8_t* buffer,
204 size_t size) {
205 if (offset != total_offset_) {
206 LOG(ERROR) << "Sequential read expected, expected to read at: "
207 << total_offset_ << " actual read occurs at: " << offset;
208 return false;
209 }
210 if (partition_->hash_tree_size != 0) {
211 const uint64_t hash_tree_data_end =
212 partition_->hash_tree_data_offset + partition_->hash_tree_data_size;
213 const uint64_t start_offset =
214 std::max(offset, partition_->hash_tree_data_offset);
215 if (offset + size > hash_tree_data_end) {
216 LOG(WARNING)
217 << "Reading past hash_tree_data_end, something is probably "
218 "wrong, might cause incorrect hash of partitions. offset: "
219 << offset << " size: " << size
220 << " hash_tree_data_end: " << hash_tree_data_end;
221 }
222 const uint64_t end_offset = std::min(offset + size, hash_tree_data_end);
223 if (start_offset < end_offset) {
224 TEST_AND_RETURN_FALSE(hash_tree_builder_->Update(
225 buffer + start_offset - offset, end_offset - start_offset));
226
227 if (end_offset == hash_tree_data_end) {
228 LOG(INFO)
229 << "Read everything before hash tree. Ready to write hash tree.";
230 }
231 }
232 }
233 total_offset_ += size;
234
235 return true;
236 }
Finalize(FileDescriptor * read_fd,FileDescriptor * write_fd)237 bool VerityWriterAndroid::Finalize(FileDescriptor* read_fd,
238 FileDescriptor* write_fd) {
239 const auto hash_tree_data_end =
240 partition_->hash_tree_data_offset + partition_->hash_tree_data_size;
241 if (total_offset_ < hash_tree_data_end) {
242 LOG(ERROR) << "Read up to " << total_offset_
243 << " when we are expecting to read everything "
244 "before "
245 << hash_tree_data_end;
246 return false;
247 }
248 // All hash tree data blocks has been hashed, write hash tree to disk.
249 LOG(INFO) << "Writing verity hash tree to "
250 << partition_->readonly_target_path;
251 if (hash_tree_builder_) {
252 TEST_AND_RETURN_FALSE(hash_tree_builder_->BuildHashTree());
253 TEST_AND_RETURN_FALSE_ERRNO(
254 write_fd->Seek(partition_->hash_tree_offset, SEEK_SET));
255 auto success =
256 hash_tree_builder_->WriteHashTree([write_fd](auto data, auto size) {
257 return utils::WriteAll(write_fd, data, size);
258 });
259 // hashtree builder already prints error messages.
260 TEST_AND_RETURN_FALSE(success);
261 hash_tree_builder_.reset();
262 }
263 if (partition_->fec_size != 0) {
264 LOG(INFO) << "Writing verity FEC to " << partition_->readonly_target_path;
265 TEST_AND_RETURN_FALSE(EncodeFEC(read_fd,
266 write_fd,
267 partition_->fec_data_offset,
268 partition_->fec_data_size,
269 partition_->fec_offset,
270 partition_->fec_size,
271 partition_->fec_roots,
272 partition_->block_size,
273 false /* verify_mode */));
274 }
275 return true;
276 }
277
IncrementalFinalize(FileDescriptor * read_fd,FileDescriptor * write_fd)278 bool VerityWriterAndroid::IncrementalFinalize(FileDescriptor* read_fd,
279 FileDescriptor* write_fd) {
280 if (!hash_tree_written_) {
281 LOG(INFO) << "Completing prework in Finalize";
282 const auto hash_tree_data_end =
283 partition_->hash_tree_data_offset + partition_->hash_tree_data_size;
284 if (total_offset_ < hash_tree_data_end) {
285 LOG(ERROR) << "Read up to " << total_offset_
286 << " when we are expecting to read everything "
287 "before "
288 << hash_tree_data_end;
289 return false;
290 }
291 // All hash tree data blocks has been hashed, write hash tree to disk.
292 LOG(INFO) << "Writing verity hash tree to "
293 << partition_->readonly_target_path;
294 if (hash_tree_builder_) {
295 TEST_AND_RETURN_FALSE(hash_tree_builder_->BuildHashTree());
296 TEST_AND_RETURN_FALSE_ERRNO(
297 write_fd->Seek(partition_->hash_tree_offset, SEEK_SET));
298 auto success =
299 hash_tree_builder_->WriteHashTree([write_fd](auto data, auto size) {
300 return utils::WriteAll(write_fd, data, size);
301 });
302 // hashtree builder already prints error messages.
303 TEST_AND_RETURN_FALSE(success);
304 hash_tree_builder_.reset();
305 }
306 hash_tree_written_ = true;
307 if (partition_->fec_size != 0) {
308 LOG(INFO) << "Writing verity FEC to " << partition_->readonly_target_path;
309 }
310 }
311 if (partition_->fec_size != 0) {
312 TEST_AND_RETURN_FALSE(encodeFEC_.Compute(read_fd, write_fd));
313 }
314 return true;
315 }
FECFinished() const316 bool VerityWriterAndroid::FECFinished() const {
317 if ((encodeFEC_.Finished() || partition_->fec_size == 0) &&
318 hash_tree_written_) {
319 return true;
320 }
321 return false;
322 }
323
GetProgress()324 double VerityWriterAndroid::GetProgress() {
325 return encodeFEC_.ReportProgress();
326 }
327
EncodeFEC(FileDescriptor * read_fd,FileDescriptor * write_fd,uint64_t data_offset,uint64_t data_size,uint64_t fec_offset,uint64_t fec_size,uint32_t fec_roots,uint32_t block_size,bool verify_mode)328 bool VerityWriterAndroid::EncodeFEC(FileDescriptor* read_fd,
329 FileDescriptor* write_fd,
330 uint64_t data_offset,
331 uint64_t data_size,
332 uint64_t fec_offset,
333 uint64_t fec_size,
334 uint32_t fec_roots,
335 uint32_t block_size,
336 bool verify_mode) {
337 TEST_AND_RETURN_FALSE(data_size % block_size == 0);
338 TEST_AND_RETURN_FALSE(fec_roots >= 0 && fec_roots < FEC_RSM);
339 // This is the N in RS(M, N), which is the number of bytes for each rs
340 // block.
341 size_t rs_n = FEC_RSM - fec_roots;
342 uint64_t rounds = utils::DivRoundUp(data_size / block_size, rs_n);
343 TEST_AND_RETURN_FALSE(rounds * fec_roots * block_size == fec_size);
344
345 std::unique_ptr<void, decltype(&free_rs_char)> rs_char(
346 init_rs_char(FEC_PARAMS(fec_roots)), &free_rs_char);
347 TEST_AND_RETURN_FALSE(rs_char != nullptr);
348 // Cache at most 1MB of fec data, in VABC, we need to re-open fd if we
349 // perform a read() operation after write(). So reduce the number of writes
350 // can save unnecessary re-opens.
351 UnownedCachedFileDescriptor cache_fd(write_fd, 1 * (1 << 20));
352 write_fd = &cache_fd;
353
354 for (size_t i = 0; i < rounds; i++) {
355 // Encodes |block_size| number of rs blocks each round so that we can read
356 // one block each time instead of 1 byte to increase random read
357 // performance. This uses about 1 MiB memory for 4K block size.
358 brillo::Blob rs_blocks(block_size * rs_n);
359 for (size_t j = 0; j < rs_n; j++) {
360 brillo::Blob buffer(block_size, 0);
361 uint64_t offset =
362 fec_ecc_interleave(i * rs_n * block_size + j, rs_n, rounds);
363 // Don't read past |data_size|, treat them as 0.
364 if (offset < data_size) {
365 ssize_t bytes_read = 0;
366 TEST_AND_RETURN_FALSE(utils::PReadAll(read_fd,
367 buffer.data(),
368 buffer.size(),
369 data_offset + offset,
370 &bytes_read));
371 TEST_AND_RETURN_FALSE(bytes_read >= 0);
372 TEST_AND_RETURN_FALSE(static_cast<size_t>(bytes_read) == buffer.size());
373 }
374 for (size_t k = 0; k < buffer.size(); k++) {
375 rs_blocks[k * rs_n + j] = buffer[k];
376 }
377 }
378 brillo::Blob fec(block_size * fec_roots);
379 for (size_t j = 0; j < block_size; j++) {
380 // Encode [j * rs_n : (j + 1) * rs_n) in |rs_blocks| and write
381 // |fec_roots| number of parity bytes to |j * fec_roots| in |fec|.
382 encode_rs_char(rs_char.get(),
383 rs_blocks.data() + j * rs_n,
384 fec.data() + j * fec_roots);
385 }
386
387 if (verify_mode) {
388 brillo::Blob fec_read(fec.size());
389 ssize_t bytes_read = 0;
390 TEST_AND_RETURN_FALSE(utils::PReadAll(
391 read_fd, fec_read.data(), fec_read.size(), fec_offset, &bytes_read));
392 TEST_AND_RETURN_FALSE(bytes_read >= 0);
393 TEST_AND_RETURN_FALSE(static_cast<size_t>(bytes_read) == fec_read.size());
394 TEST_AND_RETURN_FALSE(fec == fec_read);
395 } else {
396 CHECK(write_fd);
397 write_fd->Seek(fec_offset, SEEK_SET);
398 if (!utils::WriteAll(write_fd, fec.data(), fec.size())) {
399 PLOG(ERROR) << "EncodeFEC write() failed";
400 return false;
401 }
402 }
403 fec_offset += fec.size();
404 }
405 write_fd->Flush();
406 return true;
407 }
408
EncodeFEC(const std::string & path,uint64_t data_offset,uint64_t data_size,uint64_t fec_offset,uint64_t fec_size,uint32_t fec_roots,uint32_t block_size,bool verify_mode)409 bool VerityWriterAndroid::EncodeFEC(const std::string& path,
410 uint64_t data_offset,
411 uint64_t data_size,
412 uint64_t fec_offset,
413 uint64_t fec_size,
414 uint32_t fec_roots,
415 uint32_t block_size,
416 bool verify_mode) {
417 EintrSafeFileDescriptor fd;
418 TEST_AND_RETURN_FALSE(fd.Open(path.c_str(), verify_mode ? O_RDONLY : O_RDWR));
419 return EncodeFEC(&fd,
420 &fd,
421 data_offset,
422 data_size,
423 fec_offset,
424 fec_size,
425 fec_roots,
426 block_size,
427 verify_mode);
428 }
429 } // namespace chromeos_update_engine
430