1# Copyright (C) 2020 The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#      http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15import copy
16import itertools
17import logging
18import os
19import shutil
20import struct
21import zipfile
22
23import ota_metadata_pb2
24import common
25import fnmatch
26from common import (ZipDelete, DoesInputFileContain, ReadBytesFromInputFile, OPTIONS, MakeTempFile,
27                    ZipWriteStr, BuildInfo, LoadDictionaryFromFile,
28                    SignFile, PARTITIONS_WITH_BUILD_PROP, PartitionBuildProps,
29                    GetRamdiskFormat, ParseUpdateEngineConfig)
30import payload_signer
31from payload_signer import PayloadSigner, AddSigningArgumentParse, GeneratePayloadProperties
32
33
34logger = logging.getLogger(__name__)
35
36OPTIONS.no_signing = False
37OPTIONS.force_non_ab = False
38OPTIONS.wipe_user_data = False
39OPTIONS.downgrade = False
40OPTIONS.key_passwords = {}
41OPTIONS.incremental_source = None
42OPTIONS.retrofit_dynamic_partitions = False
43OPTIONS.output_metadata_path = None
44OPTIONS.boot_variable_file = None
45
46METADATA_NAME = 'META-INF/com/android/metadata'
47METADATA_PROTO_NAME = 'META-INF/com/android/metadata.pb'
48UNZIP_PATTERN = ['IMAGES/*', 'META/*', 'OTA/*',
49                 'RADIO/*', '*/build.prop', '*/default.prop', '*/build.default', "*/etc/vintf/*"]
50SECURITY_PATCH_LEVEL_PROP_NAME = "ro.build.version.security_patch"
51TARGET_FILES_IMAGES_SUBDIR = ["IMAGES", "PREBUILT_IMAGES", "RADIO"]
52
53
54# Key is the compression algorithm, value is minimum API level required to
55# use this compression algorithm for VABC OTA on device.
56VABC_COMPRESSION_PARAM_SUPPORT = {
57    "gz": 31,
58    "brotli": 31,
59    "none": 31,
60    # lz4 support is added in Android U
61    "lz4": 34,
62    # zstd support is added in Android V
63    "zstd": 35,
64}
65
66
67def FinalizeMetadata(metadata, input_file, output_file, needed_property_files=None, package_key=None, pw=None):
68  """Finalizes the metadata and signs an A/B OTA package.
69
70  In order to stream an A/B OTA package, we need 'ota-streaming-property-files'
71  that contains the offsets and sizes for the ZIP entries. An example
72  property-files string is as follows.
73
74    "payload.bin:679:343,payload_properties.txt:378:45,metadata:69:379"
75
76  OTA server can pass down this string, in addition to the package URL, to the
77  system update client. System update client can then fetch individual ZIP
78  entries (ZIP_STORED) directly at the given offset of the URL.
79
80  Args:
81    metadata: The metadata dict for the package.
82    input_file: The input ZIP filename that doesn't contain the package METADATA
83        entry yet.
84    output_file: The final output ZIP filename.
85    needed_property_files: The list of PropertyFiles' to be generated. Default is [AbOtaPropertyFiles(), StreamingPropertyFiles()]
86    package_key: The key used to sign this OTA package
87    pw: Password for the package_key
88  """
89  no_signing = package_key is None
90
91  if needed_property_files is None:
92    # AbOtaPropertyFiles intends to replace StreamingPropertyFiles, as it covers
93    # all the info of the latter. However, system updaters and OTA servers need to
94    # take time to switch to the new flag. We keep both of the flags for
95    # P-timeframe, and will remove StreamingPropertyFiles in later release.
96    needed_property_files = (
97        AbOtaPropertyFiles(),
98        StreamingPropertyFiles(),
99    )
100
101  def ComputeAllPropertyFiles(input_file, needed_property_files):
102    # Write the current metadata entry with placeholders.
103    with zipfile.ZipFile(input_file, 'r', allowZip64=True) as input_zip:
104      for property_files in needed_property_files:
105        metadata.property_files[property_files.name] = property_files.Compute(
106            input_zip)
107
108    ZipDelete(input_file, [METADATA_NAME, METADATA_PROTO_NAME], True)
109    with zipfile.ZipFile(input_file, 'a', allowZip64=True) as output_zip:
110      WriteMetadata(metadata, output_zip)
111
112    if no_signing:
113      return input_file
114
115    prelim_signing = MakeTempFile(suffix='.zip')
116    SignOutput(input_file, prelim_signing, package_key, pw)
117    return prelim_signing
118
119  def FinalizeAllPropertyFiles(prelim_signing, needed_property_files):
120    with zipfile.ZipFile(prelim_signing, 'r', allowZip64=True) as prelim_signing_zip:
121      for property_files in needed_property_files:
122        metadata.property_files[property_files.name] = property_files.Finalize(
123            prelim_signing_zip,
124            len(metadata.property_files[property_files.name]))
125
126  # SignOutput(), which in turn calls signapk.jar, will possibly reorder the ZIP
127  # entries, as well as padding the entry headers. We do a preliminary signing
128  # (with an incomplete metadata entry) to allow that to happen. Then compute
129  # the ZIP entry offsets, write back the final metadata and do the final
130  # signing.
131  prelim_signing = ComputeAllPropertyFiles(input_file, needed_property_files)
132  try:
133    FinalizeAllPropertyFiles(prelim_signing, needed_property_files)
134  except PropertyFiles.InsufficientSpaceException:
135    # Even with the preliminary signing, the entry orders may change
136    # dramatically, which leads to insufficiently reserved space during the
137    # first call to ComputeAllPropertyFiles(). In that case, we redo all the
138    # preliminary signing works, based on the already ordered ZIP entries, to
139    # address the issue.
140    prelim_signing = ComputeAllPropertyFiles(
141        prelim_signing, needed_property_files)
142    FinalizeAllPropertyFiles(prelim_signing, needed_property_files)
143
144  # Replace the METADATA entry.
145  ZipDelete(prelim_signing, [METADATA_NAME, METADATA_PROTO_NAME])
146  with zipfile.ZipFile(prelim_signing, 'a', allowZip64=True) as output_zip:
147    WriteMetadata(metadata, output_zip)
148
149  # Re-sign the package after updating the metadata entry.
150  if no_signing:
151    logger.info(f"Signing disabled for output file {output_file}")
152    shutil.copy(prelim_signing, output_file)
153  else:
154    logger.info(
155        f"Signing the output file {output_file} with key {package_key}")
156    SignOutput(prelim_signing, output_file, package_key, pw)
157
158  # Reopen the final signed zip to double check the streaming metadata.
159  with zipfile.ZipFile(output_file, allowZip64=True) as output_zip:
160    for property_files in needed_property_files:
161      property_files.Verify(
162          output_zip, metadata.property_files[property_files.name].strip())
163
164  # If requested, dump the metadata to a separate file.
165  output_metadata_path = OPTIONS.output_metadata_path
166  if output_metadata_path:
167    WriteMetadata(metadata, output_metadata_path)
168
169
170def WriteMetadata(metadata_proto, output):
171  """Writes the metadata to the zip archive or a file.
172
173  Args:
174    metadata_proto: The metadata protobuf for the package.
175    output: A ZipFile object or a string of the output file path. If a string
176      path is given, the metadata in the protobuf format will be written to
177      {output}.pb, e.g. ota_metadata.pb
178  """
179
180  metadata_dict = BuildLegacyOtaMetadata(metadata_proto)
181  legacy_metadata = "".join(["%s=%s\n" % kv for kv in
182                             sorted(metadata_dict.items())])
183  if isinstance(output, zipfile.ZipFile):
184    ZipWriteStr(output, METADATA_PROTO_NAME, metadata_proto.SerializeToString(),
185                compress_type=zipfile.ZIP_STORED)
186    ZipWriteStr(output, METADATA_NAME, legacy_metadata,
187                compress_type=zipfile.ZIP_STORED)
188    return
189
190  with open('{}.pb'.format(output), 'wb') as f:
191    f.write(metadata_proto.SerializeToString())
192  with open(output, 'w') as f:
193    f.write(legacy_metadata)
194
195
196def UpdateDeviceState(device_state, build_info, boot_variable_values,
197                      is_post_build):
198  """Update the fields of the DeviceState proto with build info."""
199
200  def UpdatePartitionStates(partition_states):
201    """Update the per-partition state according to its build.prop"""
202    if not build_info.is_ab:
203      return
204    build_info_set = ComputeRuntimeBuildInfos(build_info,
205                                              boot_variable_values)
206    assert "ab_partitions" in build_info.info_dict,\
207        "ab_partitions property required for ab update."
208    ab_partitions = set(build_info.info_dict.get("ab_partitions"))
209
210    # delta_generator will error out on unused timestamps,
211    # so only generate timestamps for dynamic partitions
212    # used in OTA update.
213    for partition in sorted(set(PARTITIONS_WITH_BUILD_PROP) & ab_partitions):
214      partition_prop = build_info.info_dict.get(
215          '{}.build.prop'.format(partition))
216      # Skip if the partition is missing, or it doesn't have a build.prop
217      if not partition_prop or not partition_prop.build_props:
218        continue
219
220      partition_state = partition_states.add()
221      partition_state.partition_name = partition
222      # Update the partition's runtime device names and fingerprints
223      partition_devices = set()
224      partition_fingerprints = set()
225      for runtime_build_info in build_info_set:
226        partition_devices.add(
227            runtime_build_info.GetPartitionBuildProp('ro.product.device',
228                                                     partition))
229        partition_fingerprints.add(
230            runtime_build_info.GetPartitionFingerprint(partition))
231
232      partition_state.device.extend(sorted(partition_devices))
233      partition_state.build.extend(sorted(partition_fingerprints))
234
235      # TODO(xunchang) set the boot image's version with kmi. Note the boot
236      # image doesn't have a file map.
237      partition_state.version = build_info.GetPartitionBuildProp(
238          'ro.build.date.utc', partition)
239
240  # TODO(xunchang), we can save a call to ComputeRuntimeBuildInfos.
241  build_devices, build_fingerprints = \
242      CalculateRuntimeDevicesAndFingerprints(build_info, boot_variable_values)
243  device_state.device.extend(sorted(build_devices))
244  device_state.build.extend(sorted(build_fingerprints))
245  device_state.build_incremental = build_info.GetBuildProp(
246      'ro.build.version.incremental')
247
248  UpdatePartitionStates(device_state.partition_state)
249
250  if is_post_build:
251    device_state.sdk_level = build_info.GetBuildProp(
252        'ro.build.version.sdk')
253    device_state.security_patch_level = build_info.GetBuildProp(
254        'ro.build.version.security_patch')
255    # Use the actual post-timestamp, even for a downgrade case.
256    device_state.timestamp = int(build_info.GetBuildProp('ro.build.date.utc'))
257
258
259def GetPackageMetadata(target_info, source_info=None):
260  """Generates and returns the metadata proto.
261
262  It generates a ota_metadata protobuf that contains the info to be written
263  into an OTA package (META-INF/com/android/metadata.pb). It also handles the
264  detection of downgrade / data wipe based on the global options.
265
266  Args:
267    target_info: The BuildInfo instance that holds the target build info.
268    source_info: The BuildInfo instance that holds the source build info, or
269        None if generating full OTA.
270
271  Returns:
272    A protobuf to be written into package metadata entry.
273  """
274  assert isinstance(target_info, BuildInfo)
275  assert source_info is None or isinstance(source_info, BuildInfo)
276
277  boot_variable_values = {}
278  if OPTIONS.boot_variable_file:
279    d = LoadDictionaryFromFile(OPTIONS.boot_variable_file)
280    for key, values in d.items():
281      boot_variable_values[key] = [val.strip() for val in values.split(',')]
282
283  metadata_proto = ota_metadata_pb2.OtaMetadata()
284  # TODO(xunchang) some fields, e.g. post-device isn't necessary. We can
285  # consider skipping them if they aren't used by clients.
286  UpdateDeviceState(metadata_proto.postcondition, target_info,
287                    boot_variable_values, True)
288
289  if target_info.is_ab and not OPTIONS.force_non_ab:
290    metadata_proto.type = ota_metadata_pb2.OtaMetadata.AB
291    metadata_proto.required_cache = 0
292  else:
293    metadata_proto.type = ota_metadata_pb2.OtaMetadata.BLOCK
294    # cache requirement will be updated by the non-A/B codes.
295
296  if OPTIONS.wipe_user_data:
297    metadata_proto.wipe = True
298
299  if OPTIONS.retrofit_dynamic_partitions:
300    metadata_proto.retrofit_dynamic_partitions = True
301
302  is_incremental = source_info is not None
303  if is_incremental:
304    UpdateDeviceState(metadata_proto.precondition, source_info,
305                      boot_variable_values, False)
306  else:
307    metadata_proto.precondition.device.extend(
308        metadata_proto.postcondition.device)
309
310  # Detect downgrades and set up downgrade flags accordingly.
311  if is_incremental:
312    HandleDowngradeMetadata(metadata_proto, target_info, source_info)
313
314  return metadata_proto
315
316
317def BuildLegacyOtaMetadata(metadata_proto):
318  """Converts the metadata proto to a legacy metadata dict.
319
320  This metadata dict is used to build the legacy metadata text file for
321  backward compatibility. We won't add new keys to the legacy metadata format.
322  If new information is needed, we should add it as a new field in OtaMetadata
323  proto definition.
324  """
325
326  separator = '|'
327
328  metadata_dict = {}
329  if metadata_proto.type == ota_metadata_pb2.OtaMetadata.AB:
330    metadata_dict['ota-type'] = 'AB'
331  elif metadata_proto.type == ota_metadata_pb2.OtaMetadata.BLOCK:
332    metadata_dict['ota-type'] = 'BLOCK'
333  if metadata_proto.wipe:
334    metadata_dict['ota-wipe'] = 'yes'
335  if metadata_proto.retrofit_dynamic_partitions:
336    metadata_dict['ota-retrofit-dynamic-partitions'] = 'yes'
337  if metadata_proto.downgrade:
338    metadata_dict['ota-downgrade'] = 'yes'
339
340  metadata_dict['ota-required-cache'] = str(metadata_proto.required_cache)
341
342  post_build = metadata_proto.postcondition
343  metadata_dict['post-build'] = separator.join(post_build.build)
344  metadata_dict['post-build-incremental'] = post_build.build_incremental
345  metadata_dict['post-sdk-level'] = post_build.sdk_level
346  metadata_dict['post-security-patch-level'] = post_build.security_patch_level
347  metadata_dict['post-timestamp'] = str(post_build.timestamp)
348
349  pre_build = metadata_proto.precondition
350  metadata_dict['pre-device'] = separator.join(pre_build.device)
351  # incremental updates
352  if len(pre_build.build) != 0:
353    metadata_dict['pre-build'] = separator.join(pre_build.build)
354    metadata_dict['pre-build-incremental'] = pre_build.build_incremental
355
356  if metadata_proto.spl_downgrade:
357    metadata_dict['spl-downgrade'] = 'yes'
358  metadata_dict.update(metadata_proto.property_files)
359
360  return metadata_dict
361
362
363def HandleDowngradeMetadata(metadata_proto, target_info, source_info):
364  # Only incremental OTAs are allowed to reach here.
365  assert OPTIONS.incremental_source is not None
366
367  # used for logging upon errors
368  log_downgrades = []
369  log_upgrades = []
370
371  post_timestamp = target_info.GetBuildProp("ro.build.date.utc")
372  pre_timestamp = source_info.GetBuildProp("ro.build.date.utc")
373  if int(post_timestamp) < int(pre_timestamp):
374    logger.info(f"ro.build.date.utc pre timestamp: {pre_timestamp}, "
375                f"post timestamp: {post_timestamp}. Downgrade detected.")
376    log_downgrades.append(f"ro.build.date.utc pre: {pre_timestamp} post: {post_timestamp}")
377  else:
378    logger.info(f"ro.build.date.utc pre timestamp: {pre_timestamp}, "
379                f"post timestamp: {post_timestamp}.")
380    log_upgrades.append(f"ro.build.date.utc pre: {pre_timestamp} post: {post_timestamp}")
381
382  # When merging system and vendor target files, it is not enough
383  # to check ro.build.date.utc, the timestamp for each partition must
384  # be checked.
385  if source_info.is_ab:
386    ab_partitions = set(source_info.get("ab_partitions"))
387    for partition in sorted(set(PARTITIONS_WITH_BUILD_PROP) & ab_partitions):
388
389      partition_prop = source_info.get('{}.build.prop'.format(partition))
390      # Skip if the partition is missing, or it doesn't have a build.prop
391      if not partition_prop or not partition_prop.build_props:
392        continue
393      partition_prop = target_info.get('{}.build.prop'.format(partition))
394      # Skip if the partition is missing, or it doesn't have a build.prop
395      if not partition_prop or not partition_prop.build_props:
396        continue
397
398      post_timestamp = target_info.GetPartitionBuildProp(
399        'ro.build.date.utc', partition)
400      pre_timestamp = source_info.GetPartitionBuildProp(
401        'ro.build.date.utc', partition)
402      if int(post_timestamp) < int(pre_timestamp):
403        logger.info(f"Partition {partition} pre timestamp: {pre_timestamp}, "
404                    f"post time: {post_timestamp}. Downgrade detected.")
405        log_downgrades.append(f"{partition} pre: {pre_timestamp} post: {post_timestamp}")
406      else:
407        logger.info(f"Partition {partition} pre timestamp: {pre_timestamp}, "
408                    f"post timestamp: {post_timestamp}.")
409        log_upgrades.append(f"{partition} pre: {pre_timestamp} post: {post_timestamp}")
410
411  if OPTIONS.spl_downgrade:
412    metadata_proto.spl_downgrade = True
413
414  if OPTIONS.downgrade:
415    if len(log_downgrades) == 0:
416      raise RuntimeError(
417          "--downgrade or --override_timestamp specified but no downgrade "
418          "detected. Current values for ro.build.date.utc: " + ', '.join(log_upgrades))
419    metadata_proto.downgrade = True
420  else:
421    if len(log_downgrades) != 0:
422      raise RuntimeError(
423          "Downgrade detected based on timestamp check in ro.build.date.utc. "
424          "Need to specify --override_timestamp OR --downgrade to allow "
425          "building the incremental. Downgrades detected for: "
426          + ', '.join(log_downgrades))
427
428def ComputeRuntimeBuildInfos(default_build_info, boot_variable_values):
429  """Returns a set of build info objects that may exist during runtime."""
430
431  build_info_set = {default_build_info}
432  if not boot_variable_values:
433    return build_info_set
434
435  # Calculate all possible combinations of the values for the boot variables.
436  keys = boot_variable_values.keys()
437  value_list = boot_variable_values.values()
438  combinations = [dict(zip(keys, values))
439                  for values in itertools.product(*value_list)]
440  for placeholder_values in combinations:
441    # Reload the info_dict as some build properties may change their values
442    # based on the value of ro.boot* properties.
443    info_dict = copy.deepcopy(default_build_info.info_dict)
444    for partition in PARTITIONS_WITH_BUILD_PROP:
445      partition_prop_key = "{}.build.prop".format(partition)
446      input_file = info_dict[partition_prop_key].input_file
447      ramdisk = GetRamdiskFormat(info_dict)
448      if isinstance(input_file, zipfile.ZipFile):
449        with zipfile.ZipFile(input_file.filename, allowZip64=True) as input_zip:
450          info_dict[partition_prop_key] = \
451              PartitionBuildProps.FromInputFile(input_zip, partition,
452                                                placeholder_values,
453                                                ramdisk)
454      else:
455        info_dict[partition_prop_key] = \
456            PartitionBuildProps.FromInputFile(input_file, partition,
457                                              placeholder_values,
458                                              ramdisk)
459    info_dict["build.prop"] = info_dict["system.build.prop"]
460    build_info_set.add(BuildInfo(info_dict, default_build_info.oem_dicts))
461
462  return build_info_set
463
464
465def CalculateRuntimeDevicesAndFingerprints(default_build_info,
466                                           boot_variable_values):
467  """Returns a tuple of sets for runtime devices and fingerprints"""
468
469  device_names = set()
470  fingerprints = set()
471  build_info_set = ComputeRuntimeBuildInfos(default_build_info,
472                                            boot_variable_values)
473  for runtime_build_info in build_info_set:
474    device_names.add(runtime_build_info.device)
475    fingerprints.add(runtime_build_info.fingerprint)
476  return device_names, fingerprints
477
478
479def GetZipEntryOffset(zfp, entry_info):
480  """Get offset to a beginning of a particular zip entry
481  Args:
482    fp: zipfile.ZipFile
483    entry_info: zipfile.ZipInfo
484
485  Returns:
486    (offset, size) tuple
487  """
488  # Don't use len(entry_info.extra). Because that returns size of extra
489  # fields in central directory. We need to look at local file directory,
490  # as these two might have different sizes.
491
492  # We cannot work with zipfile.ZipFile instances, we need a |fp| for the underlying file.
493  zfp = zfp.fp
494  zfp.seek(entry_info.header_offset)
495  data = zfp.read(zipfile.sizeFileHeader)
496  fheader = struct.unpack(zipfile.structFileHeader, data)
497  # Last two fields of local file header are filename length and
498  # extra length
499  filename_len = fheader[-2]
500  extra_len = fheader[-1]
501  offset = entry_info.header_offset
502  offset += zipfile.sizeFileHeader
503  offset += filename_len + extra_len
504  size = entry_info.file_size
505  return (offset, size)
506
507
508class PropertyFiles(object):
509  """A class that computes the property-files string for an OTA package.
510
511  A property-files string is a comma-separated string that contains the
512  offset/size info for an OTA package. The entries, which must be ZIP_STORED,
513  can be fetched directly with the package URL along with the offset/size info.
514  These strings can be used for streaming A/B OTAs, or allowing an updater to
515  download package metadata entry directly, without paying the cost of
516  downloading entire package.
517
518  Computing the final property-files string requires two passes. Because doing
519  the whole package signing (with signapk.jar) will possibly reorder the ZIP
520  entries, which may in turn invalidate earlier computed ZIP entry offset/size
521  values.
522
523  This class provides functions to be called for each pass. The general flow is
524  as follows.
525
526    property_files = PropertyFiles()
527    # The first pass, which writes placeholders before doing initial signing.
528    property_files.Compute()
529    SignOutput()
530
531    # The second pass, by replacing the placeholders with actual data.
532    property_files.Finalize()
533    SignOutput()
534
535  And the caller can additionally verify the final result.
536
537    property_files.Verify()
538  """
539
540  def __init__(self):
541    self.name = None
542    self.required = ()
543    self.optional = ()
544
545  def Compute(self, input_zip):
546    """Computes and returns a property-files string with placeholders.
547
548    We reserve extra space for the offset and size of the metadata entry itself,
549    although we don't know the final values until the package gets signed.
550
551    Args:
552      input_zip: The input ZIP file.
553
554    Returns:
555      A string with placeholders for the metadata offset/size info, e.g.
556      "payload.bin:679:343,payload_properties.txt:378:45,metadata:        ".
557    """
558    return self.GetPropertyFilesString(input_zip, reserve_space=True)
559
560  class InsufficientSpaceException(Exception):
561    pass
562
563  def Finalize(self, input_zip, reserved_length):
564    """Finalizes a property-files string with actual METADATA offset/size info.
565
566    The input ZIP file has been signed, with the ZIP entries in the desired
567    place (signapk.jar will possibly reorder the ZIP entries). Now we compute
568    the ZIP entry offsets and construct the property-files string with actual
569    data. Note that during this process, we must pad the property-files string
570    to the reserved length, so that the METADATA entry size remains the same.
571    Otherwise the entries' offsets and sizes may change again.
572
573    Args:
574      input_zip: The input ZIP file.
575      reserved_length: The reserved length of the property-files string during
576          the call to Compute(). The final string must be no more than this
577          size.
578
579    Returns:
580      A property-files string including the metadata offset/size info, e.g.
581      "payload.bin:679:343,payload_properties.txt:378:45,metadata:69:379  ".
582
583    Raises:
584      InsufficientSpaceException: If the reserved length is insufficient to hold
585          the final string.
586    """
587    result = self.GetPropertyFilesString(input_zip, reserve_space=False)
588    if len(result) > reserved_length:
589      raise self.InsufficientSpaceException(
590          'Insufficient reserved space: reserved={}, actual={}'.format(
591              reserved_length, len(result)))
592
593    result += ' ' * (reserved_length - len(result))
594    return result
595
596  def Verify(self, input_zip, expected):
597    """Verifies the input ZIP file contains the expected property-files string.
598
599    Args:
600      input_zip: The input ZIP file.
601      expected: The property-files string that's computed from Finalize().
602
603    Raises:
604      AssertionError: On finding a mismatch.
605    """
606    actual = self.GetPropertyFilesString(input_zip)
607    assert actual == expected, \
608        "Mismatching streaming metadata: {} vs {}.".format(actual, expected)
609
610  def GetPropertyFilesString(self, zip_file, reserve_space=False):
611    """
612    Constructs the property-files string per request.
613
614    Args:
615      zip_file: The input ZIP file.
616      reserved_length: The reserved length of the property-files string.
617
618    Returns:
619      A property-files string including the metadata offset/size info, e.g.
620      "payload.bin:679:343,payload_properties.txt:378:45,metadata:     ".
621    """
622
623    def ComputeEntryOffsetSize(name):
624      """Computes the zip entry offset and size."""
625      info = zip_file.getinfo(name)
626      (offset, size) = GetZipEntryOffset(zip_file, info)
627      return '%s:%d:%d' % (os.path.basename(name), offset, size)
628
629    tokens = []
630    tokens.extend(self._GetPrecomputed(zip_file))
631    for entry in self.required:
632      tokens.append(ComputeEntryOffsetSize(entry))
633    for entry in self.optional:
634      if entry in zip_file.namelist():
635        tokens.append(ComputeEntryOffsetSize(entry))
636
637    # 'META-INF/com/android/metadata' is required. We don't know its actual
638    # offset and length (as well as the values for other entries). So we reserve
639    # 15-byte as a placeholder ('offset:length'), which is sufficient to cover
640    # the space for metadata entry. Because 'offset' allows a max of 10-digit
641    # (i.e. ~9 GiB), with a max of 4-digit for the length. Note that all the
642    # reserved space serves the metadata entry only.
643    if reserve_space:
644      tokens.append('metadata:' + ' ' * 15)
645      tokens.append('metadata.pb:' + ' ' * 15)
646    else:
647      tokens.append(ComputeEntryOffsetSize(METADATA_NAME))
648      if METADATA_PROTO_NAME in zip_file.namelist():
649        tokens.append(ComputeEntryOffsetSize(METADATA_PROTO_NAME))
650
651    return ','.join(tokens)
652
653  def _GetPrecomputed(self, input_zip):
654    """Computes the additional tokens to be included into the property-files.
655
656    This applies to tokens without actual ZIP entries, such as
657    payload_metadata.bin. We want to expose the offset/size to updaters, so
658    that they can download the payload metadata directly with the info.
659
660    Args:
661      input_zip: The input zip file.
662
663    Returns:
664      A list of strings (tokens) to be added to the property-files string.
665    """
666    # pylint: disable=no-self-use
667    # pylint: disable=unused-argument
668    return []
669
670
671def SignOutput(temp_zip_name, output_zip_name, package_key=None, pw=None):
672  if package_key is None:
673    package_key = OPTIONS.package_key
674  if pw is None and OPTIONS.key_passwords:
675    pw = OPTIONS.key_passwords[package_key]
676
677  SignFile(temp_zip_name, output_zip_name, package_key, pw,
678           whole_file=True)
679
680
681def ConstructOtaApexInfo(target_zip, source_file=None):
682  """If applicable, add the source version to the apex info."""
683
684  def _ReadApexInfo(input_zip):
685    if not DoesInputFileContain(input_zip, "META/apex_info.pb"):
686      logger.warning("target_file doesn't contain apex_info.pb %s", input_zip)
687      return None
688    return ReadBytesFromInputFile(input_zip, "META/apex_info.pb")
689
690  target_apex_string = _ReadApexInfo(target_zip)
691  # Return early if the target apex info doesn't exist or is empty.
692  if not target_apex_string:
693    return target_apex_string
694
695  # If the source apex info isn't available, just return the target info
696  if not source_file:
697    return target_apex_string
698
699  source_apex_string = _ReadApexInfo(source_file)
700  if not source_apex_string:
701    return target_apex_string
702
703  source_apex_proto = ota_metadata_pb2.ApexMetadata()
704  source_apex_proto.ParseFromString(source_apex_string)
705  source_apex_versions = {apex.package_name: apex.version for apex in
706                          source_apex_proto.apex_info}
707
708  # If the apex package is available in the source build, initialize the source
709  # apex version.
710  target_apex_proto = ota_metadata_pb2.ApexMetadata()
711  target_apex_proto.ParseFromString(target_apex_string)
712  for target_apex in target_apex_proto.apex_info:
713    name = target_apex.package_name
714    if name in source_apex_versions:
715      target_apex.source_version = source_apex_versions[name]
716
717  return target_apex_proto.SerializeToString()
718
719
720def IsLz4diffCompatible(source_file: str, target_file: str):
721  """Check whether lz4diff versions in two builds are compatible
722
723  Args:
724    source_file: Path to source build's target_file.zip
725    target_file: Path to target build's target_file.zip
726
727  Returns:
728    bool true if and only if lz4diff versions are compatible
729  """
730  if source_file is None or target_file is None:
731    return False
732  # Right now we enable lz4diff as long as source build has liblz4.so.
733  # In the future we might introduce version system to lz4diff as well.
734  if zipfile.is_zipfile(source_file):
735    with zipfile.ZipFile(source_file, "r") as zfp:
736      return "META/liblz4.so" in zfp.namelist()
737  else:
738    assert os.path.isdir(source_file)
739    return os.path.exists(os.path.join(source_file, "META", "liblz4.so"))
740
741
742def IsZucchiniCompatible(source_file: str, target_file: str):
743  """Check whether zucchini versions in two builds are compatible
744
745  Args:
746    source_file: Path to source build's target_file.zip
747    target_file: Path to target build's target_file.zip
748
749  Returns:
750    bool true if and only if zucchini versions are compatible
751  """
752  if source_file is None or target_file is None:
753    return False
754  assert os.path.exists(source_file)
755  assert os.path.exists(target_file)
756
757  assert zipfile.is_zipfile(source_file) or os.path.isdir(source_file)
758  assert zipfile.is_zipfile(target_file) or os.path.isdir(target_file)
759  _ZUCCHINI_CONFIG_ENTRY_NAME = "META/zucchini_config.txt"
760
761  def ReadEntry(path, entry):
762    # Read an entry inside a .zip file or extracted dir of .zip file
763    if zipfile.is_zipfile(path):
764      with zipfile.ZipFile(path, "r", allowZip64=True) as zfp:
765        if entry in zfp.namelist():
766          return zfp.read(entry).decode()
767    else:
768      entry_path = os.path.join(path, entry)
769      if os.path.exists(entry_path):
770        with open(entry_path, "r") as fp:
771          return fp.read()
772    return False
773  sourceEntry = ReadEntry(source_file, _ZUCCHINI_CONFIG_ENTRY_NAME)
774  targetEntry = ReadEntry(target_file, _ZUCCHINI_CONFIG_ENTRY_NAME)
775  return sourceEntry and targetEntry and sourceEntry == targetEntry
776
777
778def ExtractTargetFiles(path: str):
779  if os.path.isdir(path):
780    logger.info("target files %s is already extracted", path)
781    return path
782  extracted_dir = common.MakeTempDir("target_files")
783  logger.info(f"Extracting target files {path} to {extracted_dir}")
784  common.UnzipToDir(path, extracted_dir, UNZIP_PATTERN + [""])
785  for subdir in TARGET_FILES_IMAGES_SUBDIR:
786    image_dir = os.path.join(extracted_dir, subdir)
787    if not os.path.exists(image_dir):
788      continue
789    for filename in os.listdir(image_dir):
790      if not filename.endswith(".img"):
791        continue
792      common.UnsparseImage(os.path.join(image_dir, filename))
793
794  return extracted_dir
795
796
797def LocatePartitionPath(target_files_dir: str, partition: str, allow_empty):
798  for subdir in TARGET_FILES_IMAGES_SUBDIR:
799    path = os.path.join(target_files_dir, subdir, partition + ".img")
800    if os.path.exists(path):
801      return path
802  if allow_empty:
803    return ""
804  raise common.ExternalError(
805      "Partition {} not found in target files {}".format(partition, target_files_dir))
806
807
808def GetPartitionImages(target_files_dir: str, ab_partitions, allow_empty=True):
809  assert os.path.isdir(target_files_dir)
810  return ":".join([LocatePartitionPath(target_files_dir, partition, allow_empty) for partition in ab_partitions])
811
812
813def LocatePartitionMap(target_files_dir: str, partition: str):
814  for subdir in TARGET_FILES_IMAGES_SUBDIR:
815    path = os.path.join(target_files_dir, subdir, partition + ".map")
816    if os.path.exists(path):
817      return path
818  return ""
819
820
821def GetPartitionMaps(target_files_dir: str, ab_partitions):
822  assert os.path.isdir(target_files_dir)
823  return ":".join([LocatePartitionMap(target_files_dir, partition) for partition in ab_partitions])
824
825
826class PayloadGenerator(object):
827  """Manages the creation and the signing of an A/B OTA Payload."""
828
829  PAYLOAD_BIN = payload_signer.PAYLOAD_BIN
830  PAYLOAD_PROPERTIES_TXT = payload_signer.PAYLOAD_PROPERTIES_TXT
831  SECONDARY_PAYLOAD_BIN = 'secondary/payload.bin'
832  SECONDARY_PAYLOAD_PROPERTIES_TXT = 'secondary/payload_properties.txt'
833
834  def __init__(self, secondary=False, wipe_user_data=False, minor_version=None, is_partial_update=False, spl_downgrade=False):
835    """Initializes a Payload instance.
836
837    Args:
838      secondary: Whether it's generating a secondary payload (default: False).
839    """
840    self.payload_file = None
841    self.payload_properties = None
842    self.secondary = secondary
843    self.wipe_user_data = wipe_user_data
844    self.minor_version = minor_version
845    self.is_partial_update = is_partial_update
846    self.spl_downgrade = spl_downgrade
847
848  def _Run(self, cmd):  # pylint: disable=no-self-use
849    # Don't pipe (buffer) the output if verbose is set. Let
850    # brillo_update_payload write to stdout/stderr directly, so its progress can
851    # be monitored.
852    if OPTIONS.verbose:
853      common.RunAndCheckOutput(cmd, stdout=None, stderr=None)
854    else:
855      common.RunAndCheckOutput(cmd)
856
857  def Generate(self, target_file, source_file=None, additional_args=None):
858    """Generates a payload from the given target-files zip(s).
859
860    Args:
861      target_file: The filename of the target build target-files zip.
862      source_file: The filename of the source build target-files zip; or None if
863          generating a full OTA.
864      additional_args: A list of additional args that should be passed to
865          delta_generator binary; or None.
866    """
867    if additional_args is None:
868      additional_args = []
869
870    payload_file = common.MakeTempFile(prefix="payload-", suffix=".bin")
871    target_dir = ExtractTargetFiles(target_file)
872    cmd = ["delta_generator",
873           "--out_file", payload_file]
874    with open(os.path.join(target_dir, "META", "ab_partitions.txt"), "r") as fp:
875      ab_partitions = fp.read().strip().splitlines()
876    cmd.extend(["--partition_names", ":".join(ab_partitions)])
877    cmd.extend(
878        ["--new_partitions", GetPartitionImages(target_dir, ab_partitions, False)])
879    cmd.extend(
880        ["--new_mapfiles", GetPartitionMaps(target_dir, ab_partitions)])
881    if source_file is not None:
882      source_dir = ExtractTargetFiles(source_file)
883      cmd.extend(
884          ["--old_partitions", GetPartitionImages(source_dir, ab_partitions, True)])
885      cmd.extend(
886          ["--old_mapfiles", GetPartitionMaps(source_dir, ab_partitions)])
887
888      if OPTIONS.disable_fec_computation:
889        cmd.extend(["--disable_fec_computation=true"])
890      if OPTIONS.disable_verity_computation:
891        cmd.extend(["--disable_verity_computation=true"])
892    postinstall_config = os.path.join(
893        target_dir, "META", "postinstall_config.txt")
894
895    if os.path.exists(postinstall_config):
896      cmd.extend(["--new_postinstall_config_file", postinstall_config])
897    dynamic_partition_info = os.path.join(
898        target_dir, "META", "dynamic_partitions_info.txt")
899
900    if os.path.exists(dynamic_partition_info):
901      cmd.extend(["--dynamic_partition_info_file", dynamic_partition_info])
902
903    apex_info = os.path.join(
904        target_dir, "META", "apex_info.pb")
905    if os.path.exists(apex_info):
906      cmd.extend(["--apex_info_file", apex_info])
907
908    major_version, minor_version = ParseUpdateEngineConfig(
909        os.path.join(target_dir, "META", "update_engine_config.txt"))
910    if source_file:
911      major_version, minor_version = ParseUpdateEngineConfig(
912          os.path.join(source_dir, "META", "update_engine_config.txt"))
913    if self.minor_version:
914      minor_version = self.minor_version
915    cmd.extend(["--major_version", str(major_version)])
916    if source_file is not None or self.is_partial_update:
917      cmd.extend(["--minor_version", str(minor_version)])
918    if self.is_partial_update:
919      cmd.extend(["--is_partial_update=true"])
920    cmd.extend(additional_args)
921    self._Run(cmd)
922
923    self.payload_file = payload_file
924    self.payload_properties = None
925
926  def Sign(self, payload_signer):
927    """Generates and signs the hashes of the payload and metadata.
928
929    Args:
930      payload_signer: A PayloadSigner() instance that serves the signing work.
931
932    Raises:
933      AssertionError: On any failure when calling brillo_update_payload script.
934    """
935    assert isinstance(payload_signer, PayloadSigner)
936
937    signed_payload_file = payload_signer.SignPayload(self.payload_file)
938
939    self.payload_file = signed_payload_file
940
941  def WriteToZip(self, output_zip):
942    """Writes the payload to the given zip.
943
944    Args:
945      output_zip: The output ZipFile instance.
946    """
947    assert self.payload_file is not None
948    # 4. Dump the signed payload properties.
949    properties_file = GeneratePayloadProperties(self.payload_file)
950
951
952    with open(properties_file, "a") as f:
953      if self.wipe_user_data:
954        f.write("POWERWASH=1\n")
955      if self.secondary:
956        f.write("SWITCH_SLOT_ON_REBOOT=0\n")
957      if self.spl_downgrade:
958        f.write("SPL_DOWNGRADE=1\n")
959
960
961    self.payload_properties = properties_file
962
963    if self.secondary:
964      payload_arcname = PayloadGenerator.SECONDARY_PAYLOAD_BIN
965      payload_properties_arcname = PayloadGenerator.SECONDARY_PAYLOAD_PROPERTIES_TXT
966    else:
967      payload_arcname = PayloadGenerator.PAYLOAD_BIN
968      payload_properties_arcname = PayloadGenerator.PAYLOAD_PROPERTIES_TXT
969
970    # Add the signed payload file and properties into the zip. In order to
971    # support streaming, we pack them as ZIP_STORED. So these entries can be
972    # read directly with the offset and length pairs.
973    common.ZipWrite(output_zip, self.payload_file, arcname=payload_arcname,
974                    compress_type=zipfile.ZIP_STORED)
975    common.ZipWrite(output_zip, self.payload_properties,
976                    arcname=payload_properties_arcname,
977                    compress_type=zipfile.ZIP_STORED)
978
979
980class StreamingPropertyFiles(PropertyFiles):
981  """A subclass for computing the property-files for streaming A/B OTAs."""
982
983  def __init__(self):
984    super(StreamingPropertyFiles, self).__init__()
985    self.name = 'ota-streaming-property-files'
986    self.required = (
987        # payload.bin and payload_properties.txt must exist.
988        'payload.bin',
989        'payload_properties.txt',
990    )
991    self.optional = (
992        # apex_info.pb isn't directly used in the update flow
993        'apex_info.pb',
994        # care_map is available only if dm-verity is enabled.
995        'care_map.pb',
996        'care_map.txt',
997        # compatibility.zip is available only if target supports Treble.
998        'compatibility.zip',
999    )
1000
1001
1002class AbOtaPropertyFiles(StreamingPropertyFiles):
1003  """The property-files for A/B OTA that includes payload_metadata.bin info.
1004
1005  Since P, we expose one more token (aka property-file), in addition to the ones
1006  for streaming A/B OTA, for a virtual entry of 'payload_metadata.bin'.
1007  'payload_metadata.bin' is the header part of a payload ('payload.bin'), which
1008  doesn't exist as a separate ZIP entry, but can be used to verify if the
1009  payload can be applied on the given device.
1010
1011  For backward compatibility, we keep both of the 'ota-streaming-property-files'
1012  and the newly added 'ota-property-files' in P. The new token will only be
1013  available in 'ota-property-files'.
1014  """
1015
1016  def __init__(self):
1017    super(AbOtaPropertyFiles, self).__init__()
1018    self.name = 'ota-property-files'
1019
1020  def _GetPrecomputed(self, input_zip):
1021    offset, size = self._GetPayloadMetadataOffsetAndSize(input_zip)
1022    return ['payload_metadata.bin:{}:{}'.format(offset, size)]
1023
1024  @staticmethod
1025  def _GetPayloadMetadataOffsetAndSize(input_zip):
1026    """Computes the offset and size of the payload metadata for a given package.
1027
1028    (From system/update_engine/update_metadata.proto)
1029    A delta update file contains all the deltas needed to update a system from
1030    one specific version to another specific version. The update format is
1031    represented by this struct pseudocode:
1032
1033    struct delta_update_file {
1034      char magic[4] = "CrAU";
1035      uint64 file_format_version;
1036      uint64 manifest_size;  // Size of protobuf DeltaArchiveManifest
1037
1038      // Only present if format_version > 1:
1039      uint32 metadata_signature_size;
1040
1041      // The Bzip2 compressed DeltaArchiveManifest
1042      char manifest[metadata_signature_size];
1043
1044      // The signature of the metadata (from the beginning of the payload up to
1045      // this location, not including the signature itself). This is a
1046      // serialized Signatures message.
1047      char medatada_signature_message[metadata_signature_size];
1048
1049      // Data blobs for files, no specific format. The specific offset
1050      // and length of each data blob is recorded in the DeltaArchiveManifest.
1051      struct {
1052        char data[];
1053      } blobs[];
1054
1055      // These two are not signed:
1056      uint64 payload_signatures_message_size;
1057      char payload_signatures_message[];
1058    };
1059
1060    'payload-metadata.bin' contains all the bytes from the beginning of the
1061    payload, till the end of 'medatada_signature_message'.
1062    """
1063    payload_info = input_zip.getinfo('payload.bin')
1064    (payload_offset, payload_size) = GetZipEntryOffset(input_zip, payload_info)
1065
1066    # Read the underlying raw zipfile at specified offset
1067    payload_fp = input_zip.fp
1068    payload_fp.seek(payload_offset)
1069    header_bin = payload_fp.read(24)
1070
1071    # network byte order (big-endian)
1072    header = struct.unpack("!IQQL", header_bin)
1073
1074    # 'CrAU'
1075    magic = header[0]
1076    assert magic == 0x43724155, "Invalid magic: {:x}, computed offset {}" \
1077        .format(magic, payload_offset)
1078
1079    manifest_size = header[2]
1080    metadata_signature_size = header[3]
1081    metadata_total = 24 + manifest_size + metadata_signature_size
1082    assert metadata_total <= payload_size
1083
1084    return (payload_offset, metadata_total)
1085
1086
1087def Fnmatch(filename, pattersn):
1088  return any([fnmatch.fnmatch(filename, pat) for pat in pattersn])
1089
1090
1091def CopyTargetFilesDir(input_dir):
1092  output_dir = common.MakeTempDir("target_files")
1093
1094  def SymlinkIfNotSparse(src, dst):
1095    if common.IsSparseImage(src):
1096      return common.UnsparseImage(src, dst)
1097    else:
1098      return os.symlink(os.path.realpath(src), dst)
1099
1100  for subdir in TARGET_FILES_IMAGES_SUBDIR:
1101    if not os.path.exists(os.path.join(input_dir, subdir)):
1102      continue
1103    shutil.copytree(os.path.join(input_dir, subdir), os.path.join(
1104        output_dir, subdir), dirs_exist_ok=True, copy_function=SymlinkIfNotSparse)
1105  shutil.copytree(os.path.join(input_dir, "META"), os.path.join(
1106      output_dir, "META"), dirs_exist_ok=True)
1107
1108  for (dirpath, _, filenames) in os.walk(input_dir):
1109    for filename in filenames:
1110      path = os.path.join(dirpath, filename)
1111      relative_path = path.removeprefix(input_dir).removeprefix("/")
1112      if not Fnmatch(relative_path, UNZIP_PATTERN):
1113        continue
1114      target_path = os.path.join(
1115          output_dir, relative_path)
1116      if os.path.exists(target_path):
1117        continue
1118      os.makedirs(os.path.dirname(target_path), exist_ok=True)
1119      shutil.copy(path, target_path)
1120  return output_dir
1121