Home
last modified time | relevance | path

Searched refs:num_blocks (Results 1 – 25 of 104) sorted by relevance

12345

/system/update_engine/payload_generator/
Dextent_ranges.cc39 return a.start_block() + a.num_blocks() >= b.start_block(); in ExtentsOverlapOrTouch()
41 return b.start_block() + b.num_blocks() >= a.start_block(); in ExtentsOverlapOrTouch()
47 return a.num_blocks() != 0; in ExtentsOverlap()
51 return a.start_block() + a.num_blocks() > b.start_block(); in ExtentsOverlap()
53 return b.start_block() + b.num_blocks() > a.start_block(); in ExtentsOverlap()
72 uint64_t end = std::max(first.start_block() + first.num_blocks(), in UnionOverlappingExtents()
73 second.start_block() + second.num_blocks()); in UnionOverlappingExtents()
80 if (extent.start_block() == kSparseHole || extent.num_blocks() == 0) in AddExtent()
94 del_blocks += it->num_blocks(); in AddExtent()
104 blocks_ += extent.num_blocks(); in AddExtent()
[all …]
Dmerge_sequence_generator.cc93 CHECK_EQ(src_extent.num_blocks(), dst_extent.num_blocks()); in SplitSelfOverlapping()
102 for (size_t i = 0; i < src_extent.num_blocks(); i += diff) { in SplitSelfOverlapping()
103 auto num_blocks = std::min<size_t>(diff, src_extent.num_blocks() - i); in SplitSelfOverlapping() local
105 ExtentForRange(i + src_extent.start_block(), num_blocks), in SplitSelfOverlapping()
106 ExtentForRange(i + dst_extent.start_block(), num_blocks), in SplitSelfOverlapping()
125 if (op.src_extent().num_blocks() == op.dst_extent().num_blocks()) { in ProcessXorOps()
127 op.src_extent().num_blocks() + 1); in ProcessXorOps()
129 CHECK_EQ(op.src_extent().num_blocks(), in ProcessXorOps()
130 op.dst_extent().num_blocks() + 1); in ProcessXorOps()
157 src_extent.num_blocks()); in ProcessCopyOps()
[all …]
Dextent_utils.cc42 : extent.start_block() + extent.num_blocks(); in AppendBlockToExtents()
44 extent.set_num_blocks(extent.num_blocks() + 1); in AppendBlockToExtents()
94 static_cast<uint64_t>(e.num_blocks())); in ExtentsToStringTemplate()
115 if (last_ext.start_block() + last_ext.num_blocks() == in NormalizeExtents()
118 last_ext.set_num_blocks(last_ext.num_blocks() + curr_ext.num_blocks()); in NormalizeExtents()
139 if (scanned_blocks + extent.num_blocks() > block_offset) { in ExtentsSublist()
143 uint64_t new_num_blocks = extent.num_blocks(); in ExtentsSublist()
155 scanned_blocks += extent.num_blocks(); in ExtentsSublist()
163 return a.start_block() == b.start_block() && a.num_blocks() == b.num_blocks(); in operator ==()
171 out << "(" << extent.start_block() << " - " << extent.num_blocks() << ")"; in operator <<()
Dextent_utils.h36 return x.num_blocks() < y.num_blocks(); in operator()
54 ret.resize(ret.size() + extent.num_blocks(), kSparseHole); in ExpandExtents()
57 block < (extent.start_block() + extent.num_blocks()); in ExpandExtents()
122 if (block_offset_ >= extents_[cur_extent_].num_blocks()) {
157 if (n - cur_block_count < extent.num_blocks()) { in GetNthBlock()
160 cur_block_count += extent.num_blocks(); in GetNthBlock()
167 block < extent.start_block() + extent.num_blocks(); in ExtentContains()
173 small.start_block() + small.num_blocks() <= in ExtentContains()
174 big.start_block() + big.num_blocks(); in ExtentContains()
Ddeflate_utils.cc98 size_t num_blocks = 0; in RealignSplittedFiles() local
106 num_blocks += utils::BlocksInExtents(in_file.extents); in RealignSplittedFiles()
110 TEST_AND_RETURN_FALSE(utils::BlocksInExtents(file.extents) == num_blocks); in RealignSplittedFiles()
117 ((extent.start_block() + extent.num_blocks()) * kBlockSize); in IsBitExtentInExtent()
161 last_end_block = base_ext.start_block() + base_ext.num_blocks(); in ShiftExtentsOverExtents()
165 base_ext.start_block() + base_ext.num_blocks()) { in ShiftExtentsOverExtents()
166 if (over_ext->start_block() + over_ext->num_blocks() <= in ShiftExtentsOverExtents()
167 base_ext.start_block() + base_ext.num_blocks()) { in ShiftExtentsOverExtents()
172 auto new_blocks = base_ext.start_block() + base_ext.num_blocks() - in ShiftExtentsOverExtents()
177 over_ext->num_blocks() - new_blocks)}; in ShiftExtentsOverExtents()
[all …]
Dmerge_sequence_generator_unittest.cc224 src_block_count += merge_op.src_extent().num_blocks(); in ValidateSplitSequence()
225 dst_block_count += merge_op.dst_extent().num_blocks(); in ValidateSplitSequence()
237 ASSERT_EQ(src_block_count, src_extent.num_blocks()); in ValidateSplitSequence()
238 ASSERT_EQ(dst_block_count, dst_extent.num_blocks()); in ValidateSplitSequence()
291 ASSERT_EQ(sequence[0].src_extent().num_blocks(), 6UL); in TEST_F()
292 ASSERT_EQ(sequence[0].dst_extent().num_blocks(), 5UL); in TEST_F()
330 ASSERT_EQ(sequence[0].src_extent().num_blocks(), 6UL); in TEST_F()
331 ASSERT_EQ(sequence[0].dst_extent().num_blocks(), 5UL); in TEST_F()
337 ASSERT_EQ(sequence[1].src_extent().num_blocks(), 6UL); in TEST_F()
338 ASSERT_EQ(sequence[1].dst_extent().num_blocks(), 5UL); in TEST_F()
[all …]
Dboot_img_filesystem_unittest.cc84 EXPECT_EQ(1u, files[0].extents[0].num_blocks()); in TEST_F()
90 EXPECT_EQ(2u, files[1].extents[0].num_blocks()); in TEST_F()
109 EXPECT_EQ(1u, files[0].extents[0].num_blocks()); in TEST_F()
115 EXPECT_EQ(2u, files[1].extents[0].num_blocks()); in TEST_F()
147 EXPECT_EQ(2u, files[0].extents[0].num_blocks()); in TEST_F()
153 EXPECT_EQ(1u, files[1].extents[0].num_blocks()); in TEST_F()
Dab_generator.cc121 uint64_t blocks_left = dst_ext.num_blocks(); in SplitSourceCopy()
123 if (curr_src_ext.num_blocks() <= blocks_left) { in SplitSourceCopy()
125 blocks_left -= curr_src_ext.num_blocks(); in SplitSourceCopy()
139 curr_src_ext.set_num_blocks(curr_src_ext.num_blocks() - blocks_left); in SplitSourceCopy()
141 blocks_left -= first_ext.num_blocks(); in SplitSourceCopy()
175 uint64_t data_size = dst_ext.num_blocks() * kBlockSize; in SplitAReplaceOp()
217 last_aop.op.dst_extents(last_dst_idx).num_blocks(); in MergeOperations()
220 last_aop.op.dst_extents(last_dst_idx).num_blocks() + in MergeOperations()
221 curr_aop.op.dst_extents(0).num_blocks(); in MergeOperations()
Dmapfile_filesystem.cc56 off_t num_blocks = file_size / kMapfileBlockSize; in CreateFromFile() local
63 return base::WrapUnique(new MapfileFilesystem(mapfile_filename, num_blocks)); in CreateFromFile()
67 off_t num_blocks) in MapfileFilesystem() argument
68 : mapfile_filename_(mapfile_filename), num_blocks_(num_blocks) {} in MapfileFilesystem()
/system/update_engine/payload_consumer/
Dxor_extent_writer.cc32 xor_block_data.resize(BlockSize() * xor_ext.num_blocks()); in WriteXorCowOp()
68 (src_block + xor_ext.num_blocks()) * BlockSize() + merge_op->src_offset(); in WriteXorExtent()
86 ExtentForRange(xor_ext.start_block(), xor_ext.num_blocks() - 1); in WriteXorExtent()
87 if (non_oob_extent.num_blocks() > 0) { in WriteXorExtent()
90 BlockSize() * non_oob_extent.num_blocks(), in WriteXorExtent()
95 ExtentForRange(xor_ext.start_block() + xor_ext.num_blocks() - 1, 1); in WriteXorExtent()
97 WriteXorCowOp(bytes + (xor_ext.num_blocks() - 1) * BlockSize(), in WriteXorExtent()
100 (src_block + xor_ext.num_blocks() - 1) * BlockSize())); in WriteXorExtent()
147 xor_ext.num_blocks() * BlockSize(), in WriteExtent()
165 if (ext.start_block() + ext.num_blocks() > in WriteReplaceExtents()
[all …]
Dextent_map_unittest.cc77 ASSERT_EQ(ret[0].num_blocks(), 3U); in TEST_F()
80 ASSERT_EQ(ret[1].num_blocks(), 2U); in TEST_F()
85 ASSERT_EQ(ret[0].num_blocks(), 3U); in TEST_F()
88 ASSERT_EQ(ret[1].num_blocks(), 5U); in TEST_F()
100 ASSERT_EQ(ret[0].num_blocks(), 5U); in TEST_F()
103 ASSERT_EQ(ret[1].num_blocks(), 5U); in TEST_F()
106 ASSERT_EQ(ret[2].num_blocks(), 5U); in TEST_F()
119 ASSERT_EQ(ret[0].num_blocks(), 5U); in TEST_F()
124 ASSERT_EQ(ret[0].num_blocks(), 3U); in TEST_F()
127 ASSERT_EQ(ret[1].num_blocks(), 5U); in TEST_F()
[all …]
Dsnapshot_extent_writer_unittest.cc42 size_t num_blocks; member
50 uint64_t num_blocks) override { in AddCopy() argument
51 for (size_t i = 0; i < num_blocks; i++) { in AddCopy()
66 bool AddZeroBlocks(uint64_t new_block_start, uint64_t num_blocks) override { in AddZeroBlocks() argument
133 size_t num_blocks) { in AddExtent() argument
136 extent->set_num_blocks(num_blocks); in AddExtent()
Dextent_reader.cc44 total_size_ += extent.num_blocks() * block_size_; in Init()
75 cur_extent_->num_blocks() * block_size_ - cur_extent_bytes_read_; in Read()
92 if (cur_extent_bytes_read_ == cur_extent_->num_blocks() * block_size_) { in Read()
/system/core/fs_mgr/libsnapshot/libsnapshot_cow/
Dwriter_base.cpp91 bool CowWriterBase::AddCopy(uint64_t new_block, uint64_t old_block, uint64_t num_blocks) { in AddCopy() argument
92 CHECK(num_blocks != 0); in AddCopy()
94 for (size_t i = 0; i < num_blocks; i++) { in AddCopy()
100 return EmitCopy(new_block, old_block, num_blocks); in AddCopy()
110 uint64_t num_blocks = size / options_.block_size; in AddRawBlocks() local
111 uint64_t last_block = new_block_start + num_blocks - 1; in AddRawBlocks()
126 uint64_t num_blocks = size / options_.block_size; in AddXorBlocks() local
127 uint64_t last_block = new_block_start + num_blocks - 1; in AddXorBlocks()
138 bool CowWriterBase::AddZeroBlocks(uint64_t new_block_start, uint64_t num_blocks) { in AddZeroBlocks() argument
139 uint64_t last_block = new_block_start + num_blocks - 1; in AddZeroBlocks()
[all …]
Dwriter_v3.cpp312 bool CowWriterV3::EmitCopy(uint64_t new_block, uint64_t old_block, uint64_t num_blocks) { in EmitCopy() argument
313 if (!CheckOpCount(num_blocks)) { in EmitCopy()
316 for (size_t i = 0; i < num_blocks; i++) { in EmitCopy()
399 size_t num_blocks = (size / header_.block_size); in EmitBlocks() local
401 while (total_written < num_blocks) { in EmitBlocks()
402 size_t chunk = std::min(num_blocks - total_written, batch_size_); in EmitBlocks()
421 bool CowWriterV3::EmitZeroBlocks(uint64_t new_block_start, const uint64_t num_blocks) { in EmitZeroBlocks() argument
422 if (!CheckOpCount(num_blocks)) { in EmitZeroBlocks()
425 for (uint64_t i = 0; i < num_blocks; i++) { in EmitZeroBlocks()
536 size_t num_blocks = compression_factor / header_.block_size; in GetCompressionFactor() local
[all …]
Dwriter_v3.h45 virtual bool EmitCopy(uint64_t new_block, uint64_t old_block, uint64_t num_blocks = 1) override;
49 virtual bool EmitZeroBlocks(uint64_t new_block_start, uint64_t num_blocks) override;
72 std::vector<CompressedBuffer> ProcessBlocksWithNoCompression(const size_t num_blocks,
75 std::vector<CompressedBuffer> ProcessBlocksWithCompression(const size_t num_blocks,
78 std::vector<CompressedBuffer> ProcessBlocksWithThreadedCompression(const size_t num_blocks,
81 std::vector<CompressedBuffer> CompressBlocks(const size_t num_blocks, const void* data,
Dwriter_base.h38 bool AddCopy(uint64_t new_block, uint64_t old_block, uint64_t num_blocks = 1) override;
42 bool AddZeroBlocks(uint64_t new_block_start, uint64_t num_blocks) override;
54 virtual bool EmitCopy(uint64_t new_block, uint64_t old_block, uint64_t num_blocks = 1) = 0;
58 virtual bool EmitZeroBlocks(uint64_t new_block_start, uint64_t num_blocks) = 0;
Dcow_compress.cpp213 bool CompressWorker::CompressBlocks(const void* buffer, size_t num_blocks, size_t block_size, in CompressBlocks() argument
215 return CompressBlocks(compressor_.get(), block_size, buffer, num_blocks, compressed_data); in CompressBlocks()
219 size_t num_blocks, in CompressBlocks() argument
222 while (num_blocks) { in CompressBlocks()
234 num_blocks -= 1; in CompressBlocks()
259 bool ret = CompressBlocks(blocks.buffer, blocks.num_blocks, blocks.block_size, in RunThread()
280 size_t num_blocks) { in EnqueueCompressBlocks() argument
287 blocks.num_blocks = num_blocks; in EnqueueCompressBlocks()
Dwriter_v2.cpp310 bool CowWriterV2::EmitCopy(uint64_t new_block, uint64_t old_block, uint64_t num_blocks) { in EmitCopy() argument
313 for (size_t i = 0; i < num_blocks; i++) { in EmitCopy()
335 bool CowWriterV2::CompressBlocks(size_t num_blocks, const void* data) { in CompressBlocks() argument
336 size_t num_threads = (num_blocks == 1) ? 1 : num_compress_threads_; in CompressBlocks()
337 size_t num_blocks_per_thread = num_blocks / num_threads; in CompressBlocks()
345 num_blocks, &compressed_buf_); in CompressBlocks()
354 num_blocks_per_thread = num_blocks; in CompressBlocks()
358 num_blocks -= num_blocks_per_thread; in CompressBlocks()
383 size_t num_blocks = (size / header_.block_size); in EmitBlocks() local
386 while (num_blocks) { in EmitBlocks()
[all …]
/system/core/fs_mgr/libsnapshot/include/libsnapshot/
Dcow_writer.h79 virtual bool AddCopy(uint64_t new_block, uint64_t old_block, uint64_t num_blocks = 1) = 0;
89 virtual bool AddZeroBlocks(uint64_t new_block_start, uint64_t num_blocks) = 0;
124 void EnqueueCompressBlocks(const void* buffer, size_t block_size, size_t num_blocks);
130 size_t num_blocks,
136 size_t num_blocks; member
152 bool CompressBlocks(const void* buffer, size_t num_blocks, size_t block_size,
/system/update_engine/scripts/
Dpayload_info.py145 num_blocks = sum([ext.num_blocks for ext in extents])
147 '(%s,%s)' % (ext.start_block, ext.num_blocks) for ext in extents)
151 block_plural = 's' if num_blocks > 1 else ''
153 (name, len(extents), extent_plural, num_blocks, block_plural))
182 read_blocks += sum([ext.num_blocks for ext in curr_op.src_extents])
183 written_blocks += sum([ext.num_blocks for ext in curr_op.dst_extents])
187 last_ext.start_block + last_ext.num_blocks):
/system/core/fs_mgr/libsnapshot/
Dpartition_cow_creator.cpp97 if (s_it->num_blocks() <= s_offset || d_it->num_blocks() <= d_offset) { in OptimizeSourceCopyOperation()
104 auto s_step = s_it->num_blocks() - s_offset; in OptimizeSourceCopyOperation()
105 auto d_step = d_it->num_blocks() - d_offset; in OptimizeSourceCopyOperation()
122 while (s_it != src_extents.end() && s_offset >= s_it->num_blocks()) { in OptimizeSourceCopyOperation()
126 while (d_it != dst_extents.end() && d_offset >= d_it->num_blocks()) { in OptimizeSourceCopyOperation()
136 const auto block_boundary = de.start_block() + de.num_blocks(); in WriteExtent()
/system/core/fs_mgr/libsnapshot/snapuserd/user-space-merge/
Dsnapuserd_test.cpp180 size_t num_blocks = size_ / writer->GetBlockSize(); in CreateCowDevice() local
181 size_t blk_end_copy = num_blocks * 2; in CreateCowDevice()
182 size_t source_blk = num_blocks - 1; in CreateCowDevice()
185 uint32_t sequence[num_blocks * 2]; in CreateCowDevice()
187 for (int i = 0; i < num_blocks; i++) { in CreateCowDevice()
188 sequence[i] = num_blocks - 1 - i; in CreateCowDevice()
191 for (int i = 0; i < num_blocks; i++) { in CreateCowDevice()
192 sequence[num_blocks + i] = 5 * num_blocks - 1 - i; in CreateCowDevice()
194 ASSERT_TRUE(writer->AddSequenceData(2 * num_blocks, sequence)); in CreateCowDevice()
196 size_t x = num_blocks; in CreateCowDevice()
[all …]
Dmerge_worker.cpp41 size_t num_blocks = 1; in PrepareMerge() local
55 num_blocks = (CowOpCompressionSize(cow_op, BLOCK_SZ) / BLOCK_SZ); in PrepareMerge()
60 num_ops -= num_blocks; in PrepareMerge()
61 nr_consecutive = num_blocks; in PrepareMerge()
76 num_blocks = (CowOpCompressionSize(op, BLOCK_SZ) / BLOCK_SZ); in PrepareMerge()
77 if (num_ops < num_blocks) { in PrepareMerge()
82 num_blocks = 1; in PrepareMerge()
87 nr_consecutive += num_blocks; in PrepareMerge()
88 num_ops -= num_blocks; in PrepareMerge()
/system/chre/apps/nearby/location/lbs/contexthub/nanoapps/nearby/crypto/
Dhkdf.c33 const size_t num_blocks = (okmLen + SHA2_HASH_SIZE - 1) / SHA2_HASH_SIZE; in hkdfExpand() local
34 if (num_blocks >= 256u) return; in hkdfExpand()
53 for (size_t i = 0; i < num_blocks; i++) { in hkdfExpand()

12345