1 /*
2  * Copyright (C) 2020 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 use libc::EIO;
18 use std::io;
19 
20 use super::common::{build_fsverity_digest, merkle_tree_height, FsverityError, SHA256_HASH_SIZE};
21 use crate::common::{divide_roundup, CHUNK_SIZE};
22 use crate::file::{ChunkBuffer, ReadByChunk};
23 use openssl::sha::{sha256, Sha256};
24 
25 const ZEROS: [u8; CHUNK_SIZE as usize] = [0u8; CHUNK_SIZE as usize];
26 
27 type HashBuffer = [u8; SHA256_HASH_SIZE];
28 
hash_with_padding(chunk: &[u8], pad_to: usize) -> HashBuffer29 fn hash_with_padding(chunk: &[u8], pad_to: usize) -> HashBuffer {
30     let padding_size = pad_to - chunk.len();
31     let mut ctx = Sha256::new();
32     ctx.update(chunk);
33     ctx.update(&ZEROS[..padding_size]);
34     ctx.finish()
35 }
36 
verity_check<T: ReadByChunk>( chunk: &[u8], chunk_index: u64, file_size: u64, merkle_tree: &T, ) -> Result<HashBuffer, FsverityError>37 fn verity_check<T: ReadByChunk>(
38     chunk: &[u8],
39     chunk_index: u64,
40     file_size: u64,
41     merkle_tree: &T,
42 ) -> Result<HashBuffer, FsverityError> {
43     // The caller should not be able to produce a chunk at the first place if `file_size` is 0. The
44     // current implementation expects to crash when a `ReadByChunk` implementation reads
45     // beyond the file size, including empty file.
46     assert_ne!(file_size, 0);
47 
48     let chunk_hash = hash_with_padding(chunk, CHUNK_SIZE as usize);
49 
50     // When the file is smaller or equal to CHUNK_SIZE, the root of Merkle tree is defined as the
51     // hash of the file content, plus padding.
52     if file_size <= CHUNK_SIZE {
53         return Ok(chunk_hash);
54     }
55 
56     fsverity_walk(chunk_index, file_size, merkle_tree)?.try_fold(
57         chunk_hash,
58         |actual_hash, result| {
59             let (merkle_chunk, hash_offset_in_chunk) = result?;
60             let expected_hash =
61                 &merkle_chunk[hash_offset_in_chunk..hash_offset_in_chunk + SHA256_HASH_SIZE];
62             if actual_hash != expected_hash {
63                 return Err(FsverityError::CannotVerify);
64             }
65             Ok(hash_with_padding(&merkle_chunk, CHUNK_SIZE as usize))
66         },
67     )
68 }
69 
70 /// Given a chunk index and the size of the file, returns an iterator that walks the Merkle tree
71 /// from the leaf to the root. The iterator carries the slice of the chunk/node as well as the
72 /// offset of the child node's hash. It is up to the iterator user to use the node and hash,
73 /// e.g. for the actual verification.
74 #[allow(clippy::needless_collect)]
fsverity_walk<T: ReadByChunk>( chunk_index: u64, file_size: u64, merkle_tree: &T, ) -> Result<impl Iterator<Item = Result<([u8; 4096], usize), FsverityError>> + '_, FsverityError>75 fn fsverity_walk<T: ReadByChunk>(
76     chunk_index: u64,
77     file_size: u64,
78     merkle_tree: &T,
79 ) -> Result<impl Iterator<Item = Result<([u8; 4096], usize), FsverityError>> + '_, FsverityError> {
80     let hashes_per_node = CHUNK_SIZE / SHA256_HASH_SIZE as u64;
81     debug_assert_eq!(hashes_per_node, 128u64);
82     let max_level = merkle_tree_height(file_size).expect("file should not be empty") as u32;
83     let root_to_leaf_steps = (0..=max_level)
84         .rev()
85         .map(|x| {
86             let leaves_per_hash = hashes_per_node.pow(x);
87             let leaves_size_per_hash = CHUNK_SIZE * leaves_per_hash;
88             let leaves_size_per_node = leaves_size_per_hash * hashes_per_node;
89             let nodes_at_level = divide_roundup(file_size, leaves_size_per_node);
90             let level_size = nodes_at_level * CHUNK_SIZE;
91             let offset_in_level = (chunk_index / leaves_per_hash) * SHA256_HASH_SIZE as u64;
92             (level_size, offset_in_level)
93         })
94         .scan(0, |level_offset, (level_size, offset_in_level)| {
95             let this_level_offset = *level_offset;
96             *level_offset += level_size;
97             let global_hash_offset = this_level_offset + offset_in_level;
98             Some(global_hash_offset)
99         })
100         .map(|global_hash_offset| {
101             let chunk_index = global_hash_offset / CHUNK_SIZE;
102             let hash_offset_in_chunk = (global_hash_offset % CHUNK_SIZE) as usize;
103             (chunk_index, hash_offset_in_chunk)
104         })
105         .collect::<Vec<_>>(); // Needs to collect first to be able to reverse below.
106 
107     Ok(root_to_leaf_steps.into_iter().rev().map(move |(chunk_index, hash_offset_in_chunk)| {
108         let mut merkle_chunk = [0u8; 4096];
109         // read_chunk is supposed to return a full chunk, or an incomplete one at the end of the
110         // file. In the incomplete case, the hash is calculated with 0-padding to the chunk size.
111         // Therefore, we don't need to check the returned size here.
112         let _ = merkle_tree.read_chunk(chunk_index, &mut merkle_chunk)?;
113         Ok((merkle_chunk, hash_offset_in_chunk))
114     }))
115 }
116 
117 pub struct VerifiedFileReader<F: ReadByChunk, M: ReadByChunk> {
118     pub file_size: u64,
119     chunked_file: F,
120     merkle_tree: M,
121     root_hash: HashBuffer,
122 }
123 
124 impl<F: ReadByChunk, M: ReadByChunk> VerifiedFileReader<F, M> {
new( chunked_file: F, file_size: u64, expected_digest: &[u8], merkle_tree: M, ) -> Result<VerifiedFileReader<F, M>, FsverityError>125     pub fn new(
126         chunked_file: F,
127         file_size: u64,
128         expected_digest: &[u8],
129         merkle_tree: M,
130     ) -> Result<VerifiedFileReader<F, M>, FsverityError> {
131         let mut buf = [0u8; CHUNK_SIZE as usize];
132         if file_size <= CHUNK_SIZE {
133             let _size = chunked_file.read_chunk(0, &mut buf)?;
134             // The rest of buffer is 0-padded.
135         } else {
136             let size = merkle_tree.read_chunk(0, &mut buf)?;
137             if buf.len() != size {
138                 return Err(FsverityError::InsufficientData(size));
139             }
140         }
141         let root_hash = sha256(&buf[..]);
142         if expected_digest == build_fsverity_digest(&root_hash, file_size) {
143             // Once verified, use the root_hash for verification going forward.
144             Ok(VerifiedFileReader { chunked_file, file_size, merkle_tree, root_hash })
145         } else {
146             Err(FsverityError::InvalidDigest)
147         }
148     }
149 }
150 
151 impl<F: ReadByChunk, M: ReadByChunk> ReadByChunk for VerifiedFileReader<F, M> {
read_chunk(&self, chunk_index: u64, buf: &mut ChunkBuffer) -> io::Result<usize>152     fn read_chunk(&self, chunk_index: u64, buf: &mut ChunkBuffer) -> io::Result<usize> {
153         let size = self.chunked_file.read_chunk(chunk_index, buf)?;
154         let root_hash = verity_check(&buf[..size], chunk_index, self.file_size, &self.merkle_tree)
155             .map_err(|_| io::Error::from_raw_os_error(EIO))?;
156         if root_hash != self.root_hash {
157             Err(io::Error::from_raw_os_error(EIO))
158         } else {
159             Ok(size)
160         }
161     }
162 }
163 
164 #[cfg(test)]
165 mod tests {
166     use super::*;
167     use crate::file::ReadByChunk;
168     use anyhow::Result;
169     use authfs_fsverity_metadata::{parse_fsverity_metadata, FSVerityMetadata};
170     use std::cmp::min;
171     use std::fs::File;
172     use std::os::unix::fs::FileExt;
173 
174     struct LocalFileReader {
175         file: File,
176         size: u64,
177     }
178 
179     impl LocalFileReader {
new(file: File) -> io::Result<LocalFileReader>180         fn new(file: File) -> io::Result<LocalFileReader> {
181             let size = file.metadata()?.len();
182             Ok(LocalFileReader { file, size })
183         }
184 
len(&self) -> u64185         fn len(&self) -> u64 {
186             self.size
187         }
188     }
189 
190     impl ReadByChunk for LocalFileReader {
read_chunk(&self, chunk_index: u64, buf: &mut ChunkBuffer) -> io::Result<usize>191         fn read_chunk(&self, chunk_index: u64, buf: &mut ChunkBuffer) -> io::Result<usize> {
192             let start = chunk_index * CHUNK_SIZE;
193             if start >= self.size {
194                 return Ok(0);
195             }
196             let end = min(self.size, start + CHUNK_SIZE);
197             let read_size = (end - start) as usize;
198             debug_assert!(read_size <= buf.len());
199             self.file.read_exact_at(&mut buf[..read_size], start)?;
200             Ok(read_size)
201         }
202     }
203 
204     type LocalVerifiedFileReader = VerifiedFileReader<LocalFileReader, MerkleTreeReader>;
205 
206     pub struct MerkleTreeReader {
207         metadata: Box<FSVerityMetadata>,
208     }
209 
210     impl ReadByChunk for MerkleTreeReader {
read_chunk(&self, chunk_index: u64, buf: &mut ChunkBuffer) -> io::Result<usize>211         fn read_chunk(&self, chunk_index: u64, buf: &mut ChunkBuffer) -> io::Result<usize> {
212             self.metadata.read_merkle_tree(chunk_index * CHUNK_SIZE, buf)
213         }
214     }
215 
total_chunk_number(file_size: u64) -> u64216     fn total_chunk_number(file_size: u64) -> u64 {
217         (file_size + 4095) / 4096
218     }
219 
220     // Returns a reader with fs-verity verification and the file size.
new_reader_with_fsverity( content_path: &str, metadata_path: &str, ) -> Result<(LocalVerifiedFileReader, u64)>221     fn new_reader_with_fsverity(
222         content_path: &str,
223         metadata_path: &str,
224     ) -> Result<(LocalVerifiedFileReader, u64)> {
225         let file_reader = LocalFileReader::new(File::open(content_path)?)?;
226         let file_size = file_reader.len();
227         let metadata = parse_fsverity_metadata(File::open(metadata_path)?)?;
228         Ok((
229             VerifiedFileReader::new(
230                 file_reader,
231                 file_size,
232                 &metadata.digest.clone(),
233                 MerkleTreeReader { metadata },
234             )?,
235             file_size,
236         ))
237     }
238 
239     #[test]
fsverity_verify_full_read_4k() -> Result<()>240     fn fsverity_verify_full_read_4k() -> Result<()> {
241         let (file_reader, file_size) =
242             new_reader_with_fsverity("testdata/input.4k", "testdata/input.4k.fsv_meta")?;
243 
244         for i in 0..total_chunk_number(file_size) {
245             let mut buf = [0u8; 4096];
246             assert!(file_reader.read_chunk(i, &mut buf).is_ok());
247         }
248         Ok(())
249     }
250 
251     #[test]
fsverity_verify_full_read_4k1() -> Result<()>252     fn fsverity_verify_full_read_4k1() -> Result<()> {
253         let (file_reader, file_size) =
254             new_reader_with_fsverity("testdata/input.4k1", "testdata/input.4k1.fsv_meta")?;
255 
256         for i in 0..total_chunk_number(file_size) {
257             let mut buf = [0u8; 4096];
258             assert!(file_reader.read_chunk(i, &mut buf).is_ok());
259         }
260         Ok(())
261     }
262 
263     #[test]
fsverity_verify_full_read_4m() -> Result<()>264     fn fsverity_verify_full_read_4m() -> Result<()> {
265         let (file_reader, file_size) =
266             new_reader_with_fsverity("testdata/input.4m", "testdata/input.4m.fsv_meta")?;
267 
268         for i in 0..total_chunk_number(file_size) {
269             let mut buf = [0u8; 4096];
270             assert!(file_reader.read_chunk(i, &mut buf).is_ok());
271         }
272         Ok(())
273     }
274 
275     #[test]
fsverity_verify_bad_merkle_tree() -> Result<()>276     fn fsverity_verify_bad_merkle_tree() -> Result<()> {
277         let (file_reader, _) = new_reader_with_fsverity(
278             "testdata/input.4m",
279             "testdata/input.4m.fsv_meta.bad_merkle", // First leaf node is corrupted.
280         )?;
281 
282         // A lowest broken node (a 4K chunk that contains 128 sha256 hashes) will fail the read
283         // failure of the underlying chunks, but not before or after.
284         let mut buf = [0u8; 4096];
285         let num_hashes = 4096 / 32;
286         let last_index = num_hashes;
287         for i in 0..last_index {
288             assert!(file_reader.read_chunk(i, &mut buf).is_err());
289         }
290         assert!(file_reader.read_chunk(last_index, &mut buf).is_ok());
291         Ok(())
292     }
293 }
294