1 /*
2  * Copyright (C) 2023 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 package com.android.server.healthconnect.backuprestore;
18 
19 import static android.health.connect.Constants.DEFAULT_INT;
20 import static android.health.connect.HealthConnectDataState.RESTORE_ERROR_FETCHING_DATA;
21 import static android.health.connect.HealthConnectDataState.RESTORE_ERROR_NONE;
22 import static android.health.connect.HealthConnectDataState.RESTORE_ERROR_UNKNOWN;
23 import static android.health.connect.HealthConnectDataState.RESTORE_ERROR_VERSION_DIFF;
24 import static android.health.connect.HealthConnectDataState.RESTORE_STATE_IDLE;
25 import static android.health.connect.HealthConnectDataState.RESTORE_STATE_IN_PROGRESS;
26 import static android.health.connect.HealthConnectDataState.RESTORE_STATE_PENDING;
27 import static android.health.connect.HealthConnectManager.DATA_DOWNLOAD_COMPLETE;
28 import static android.health.connect.HealthConnectManager.DATA_DOWNLOAD_FAILED;
29 import static android.health.connect.HealthConnectManager.DATA_DOWNLOAD_RETRY;
30 import static android.health.connect.HealthConnectManager.DATA_DOWNLOAD_STARTED;
31 import static android.health.connect.HealthConnectManager.DATA_DOWNLOAD_STATE_UNKNOWN;
32 
33 import static com.android.server.healthconnect.backuprestore.BackupRestore.BackupRestoreJobService.EXTRA_JOB_NAME_KEY;
34 import static com.android.server.healthconnect.backuprestore.BackupRestore.BackupRestoreJobService.EXTRA_USER_ID;
35 
36 import static java.util.Objects.requireNonNull;
37 
38 import android.annotation.IntDef;
39 import android.annotation.NonNull;
40 import android.app.job.JobInfo;
41 import android.app.job.JobParameters;
42 import android.app.job.JobScheduler;
43 import android.app.job.JobService;
44 import android.content.ComponentName;
45 import android.content.Context;
46 import android.database.sqlite.SQLiteDatabase;
47 import android.health.connect.HealthConnectDataState;
48 import android.health.connect.HealthConnectException;
49 import android.health.connect.HealthConnectManager.DataDownloadState;
50 import android.health.connect.aidl.IDataStagingFinishedCallback;
51 import android.health.connect.restore.BackupFileNamesSet;
52 import android.health.connect.restore.StageRemoteDataException;
53 import android.health.connect.restore.StageRemoteDataRequest;
54 import android.os.Binder;
55 import android.os.ParcelFileDescriptor;
56 import android.os.PersistableBundle;
57 import android.os.RemoteException;
58 import android.os.UserHandle;
59 import android.text.format.DateUtils;
60 import android.util.ArrayMap;
61 import android.util.ArraySet;
62 import android.util.Log;
63 import android.util.Slog;
64 
65 import com.android.internal.annotations.VisibleForTesting;
66 import com.android.server.healthconnect.HealthConnectThreadScheduler;
67 import com.android.server.healthconnect.exportimport.DatabaseContext;
68 import com.android.server.healthconnect.exportimport.DatabaseMerger;
69 import com.android.server.healthconnect.migration.MigrationStateManager;
70 import com.android.server.healthconnect.permission.FirstGrantTimeManager;
71 import com.android.server.healthconnect.permission.GrantTimeXmlHelper;
72 import com.android.server.healthconnect.permission.UserGrantTimeState;
73 import com.android.server.healthconnect.storage.HealthConnectDatabase;
74 import com.android.server.healthconnect.storage.TransactionManager;
75 import com.android.server.healthconnect.storage.datatypehelpers.PreferenceHelper;
76 import com.android.server.healthconnect.utils.FilesUtil;
77 import com.android.server.healthconnect.utils.RunnableWithThrowable;
78 
79 import java.io.File;
80 import java.io.FileInputStream;
81 import java.io.FileOutputStream;
82 import java.io.IOException;
83 import java.lang.annotation.Retention;
84 import java.lang.annotation.RetentionPolicy;
85 import java.nio.file.FileSystems;
86 import java.nio.file.Files;
87 import java.nio.file.Path;
88 import java.nio.file.StandardCopyOption;
89 import java.time.Instant;
90 import java.util.Collections;
91 import java.util.Map;
92 import java.util.Objects;
93 import java.util.Set;
94 import java.util.concurrent.locks.ReentrantReadWriteLock;
95 import java.util.stream.Collectors;
96 import java.util.stream.Stream;
97 
98 /**
99  * Class that takes up the responsibility to perform backup / restore related tasks.
100  *
101  * @hide
102  */
103 public final class BackupRestore {
104     // Key for storing the current data download state
105     @VisibleForTesting
106     public static final String DATA_DOWNLOAD_STATE_KEY = "data_download_state_key";
107 
108     // The below values for the IntDef are defined in chronological order of the restore process.
109     @VisibleForTesting public static final int INTERNAL_RESTORE_STATE_UNKNOWN = 0;
110     @VisibleForTesting public static final int INTERNAL_RESTORE_STATE_WAITING_FOR_STAGING = 1;
111     @VisibleForTesting public static final int INTERNAL_RESTORE_STATE_STAGING_IN_PROGRESS = 2;
112     @VisibleForTesting public static final int INTERNAL_RESTORE_STATE_STAGING_DONE = 3;
113     @VisibleForTesting public static final int INTERNAL_RESTORE_STATE_MERGING_IN_PROGRESS = 4;
114     // See b/290172311 for details.
115     @VisibleForTesting public static final int INTERNAL_RESTORE_STATE_MERGING_DONE_OLD_CODE = 5;
116 
117     @VisibleForTesting public static final int INTERNAL_RESTORE_STATE_MERGING_DONE = 6;
118 
119     @VisibleForTesting
120     static final long DATA_DOWNLOAD_TIMEOUT_INTERVAL_MILLIS = 14 * DateUtils.DAY_IN_MILLIS;
121 
122     @VisibleForTesting
123     static final long DATA_STAGING_TIMEOUT_INTERVAL_MILLIS = DateUtils.DAY_IN_MILLIS;
124 
125     @VisibleForTesting
126     static final long DATA_MERGING_TIMEOUT_INTERVAL_MILLIS = 5 * DateUtils.DAY_IN_MILLIS;
127 
128     @VisibleForTesting
129     static final long DATA_MERGING_RETRY_DELAY_MILLIS = 12 * DateUtils.HOUR_IN_MILLIS;
130 
131     // Used in #setOverrideDeadline to set a minimum window of 24 hours. See b/311402873,
132     // b/319721118
133     @VisibleForTesting
134     static final long MINIMUM_LATENCY_WINDOW_MILLIS = 24 * DateUtils.HOUR_IN_MILLIS;
135 
136     @VisibleForTesting static final String DATA_DOWNLOAD_TIMEOUT_KEY = "data_download_timeout_key";
137 
138     @VisibleForTesting static final String DATA_STAGING_TIMEOUT_KEY = "data_staging_timeout_key";
139     @VisibleForTesting static final String DATA_MERGING_TIMEOUT_KEY = "data_merging_timeout_key";
140 
141     @VisibleForTesting
142     static final String DATA_DOWNLOAD_TIMEOUT_CANCELLED_KEY = "data_download_timeout_cancelled_key";
143 
144     @VisibleForTesting
145     static final String DATA_STAGING_TIMEOUT_CANCELLED_KEY = "data_staging_timeout_cancelled_key";
146 
147     @VisibleForTesting
148     static final String DATA_MERGING_TIMEOUT_CANCELLED_KEY = "data_merging_timeout_cancelled_key";
149 
150     @VisibleForTesting static final String DATA_MERGING_RETRY_KEY = "data_merging_retry_key";
151     private static final String DATA_MERGING_RETRY_CANCELLED_KEY =
152             "data_merging_retry_cancelled_key";
153 
154     @Retention(RetentionPolicy.SOURCE)
155     @IntDef({
156         INTERNAL_RESTORE_STATE_UNKNOWN,
157         INTERNAL_RESTORE_STATE_WAITING_FOR_STAGING,
158         INTERNAL_RESTORE_STATE_STAGING_IN_PROGRESS,
159         INTERNAL_RESTORE_STATE_STAGING_DONE,
160         INTERNAL_RESTORE_STATE_MERGING_IN_PROGRESS,
161         INTERNAL_RESTORE_STATE_MERGING_DONE_OLD_CODE,
162         INTERNAL_RESTORE_STATE_MERGING_DONE
163     })
164     public @interface InternalRestoreState {}
165 
166     // Key for storing the current data restore state on disk.
167     public static final String DATA_RESTORE_STATE_KEY = "data_restore_state_key";
168     // Key for storing the error restoring HC data.
169     public static final String DATA_RESTORE_ERROR_KEY = "data_restore_error_key";
170 
171     @VisibleForTesting
172     static final String GRANT_TIME_FILE_NAME = "health-permissions-first-grant-times.xml";
173 
174     @VisibleForTesting static final String STAGED_DATABASE_DIR = "remote_staged";
175 
176     @VisibleForTesting static final String STAGED_DATABASE_NAME = "healthconnect_staged.db";
177 
178     private static final String TAG = "HealthConnectBackupRestore";
179     private final ReentrantReadWriteLock mStatesLock = new ReentrantReadWriteLock(true);
180     private final FirstGrantTimeManager mFirstGrantTimeManager;
181     private final MigrationStateManager mMigrationStateManager;
182 
183     private final Context mContext;
184     private final Object mMergingLock = new Object();
185 
186     private final DatabaseMerger mDatabaseMerger;
187 
188     private boolean mActivelyStagingRemoteData = false;
189 
190     private volatile UserHandle mCurrentForegroundUser;
191 
192     @SuppressWarnings("NullAway.Init") // TODO(b/317029272): fix this suppression
BackupRestore( FirstGrantTimeManager firstGrantTimeManager, MigrationStateManager migrationStateManager, @NonNull Context context)193     public BackupRestore(
194             FirstGrantTimeManager firstGrantTimeManager,
195             MigrationStateManager migrationStateManager,
196             @NonNull Context context) {
197         mFirstGrantTimeManager = firstGrantTimeManager;
198         mMigrationStateManager = migrationStateManager;
199         mContext = context;
200         mCurrentForegroundUser = mContext.getUser();
201         mDatabaseMerger = new DatabaseMerger(context);
202     }
203 
setupForUser(UserHandle currentForegroundUser)204     public void setupForUser(UserHandle currentForegroundUser) {
205         Slog.d(TAG, "Performing user switch operations.");
206         mCurrentForegroundUser = currentForegroundUser;
207         HealthConnectThreadScheduler.scheduleInternalTask(this::scheduleAllJobs);
208     }
209 
210     /**
211      * Prepares for staging all health connect remote data.
212      *
213      * @return true if the preparation was successful. false either if staging already in progress
214      *     or done.
215      */
prepForStagingIfNotAlreadyDone()216     public boolean prepForStagingIfNotAlreadyDone() {
217         mStatesLock.writeLock().lock();
218         try {
219             Slog.d(TAG, "Prepping for staging.");
220             setDataDownloadState(DATA_DOWNLOAD_COMPLETE, false /* force */);
221             @InternalRestoreState int curDataRestoreState = getInternalRestoreState();
222             if (curDataRestoreState >= INTERNAL_RESTORE_STATE_STAGING_IN_PROGRESS) {
223                 if (curDataRestoreState >= INTERNAL_RESTORE_STATE_STAGING_DONE) {
224                     Slog.w(TAG, "Staging is already done. Cur state " + curDataRestoreState);
225                 } else {
226                     // Maybe the caller died and is trying to stage the data again.
227                     Slog.w(TAG, "Already in the process of staging.");
228                 }
229                 return false;
230             }
231             mActivelyStagingRemoteData = true;
232             setInternalRestoreState(INTERNAL_RESTORE_STATE_STAGING_IN_PROGRESS, false /* force */);
233             return true;
234         } finally {
235             mStatesLock.writeLock().unlock();
236         }
237     }
238 
239     /**
240      * Stages all health connect remote data for merging later.
241      *
242      * <p>This should be called on the proper thread.
243      */
stageAllHealthConnectRemoteData( Map<String, ParcelFileDescriptor> pfdsByFileName, Map<String, HealthConnectException> exceptionsByFileName, @NonNull UserHandle userHandle, @NonNull IDataStagingFinishedCallback callback)244     public void stageAllHealthConnectRemoteData(
245             Map<String, ParcelFileDescriptor> pfdsByFileName,
246             Map<String, HealthConnectException> exceptionsByFileName,
247             @NonNull UserHandle userHandle,
248             @NonNull IDataStagingFinishedCallback callback) {
249         DatabaseContext dbContext =
250                 DatabaseContext.create(mContext, STAGED_DATABASE_DIR, userHandle);
251         File stagedRemoteDataDir = dbContext.getDatabaseDir();
252         try {
253             stagedRemoteDataDir.mkdirs();
254 
255             // Now that we have the dir we can try to copy all the data.
256             // Any exceptions we face will be collected and shared with the caller.
257             pfdsByFileName.forEach(
258                     (fileName, pfd) -> {
259                         File destination = new File(stagedRemoteDataDir, fileName);
260                         try (FileInputStream inputStream =
261                                 new FileInputStream(pfd.getFileDescriptor())) {
262                             Path destinationPath =
263                                     FileSystems.getDefault().getPath(destination.getAbsolutePath());
264                             Files.copy(
265                                     inputStream,
266                                     destinationPath,
267                                     StandardCopyOption.REPLACE_EXISTING);
268                         } catch (IOException e) {
269                             Slog.e(
270                                     TAG,
271                                     "Failed to get copy to destination: " + destination.getName(),
272                                     e);
273                             destination.delete();
274                             exceptionsByFileName.put(
275                                     fileName,
276                                     new HealthConnectException(
277                                             HealthConnectException.ERROR_IO, e.getMessage()));
278                         } catch (SecurityException e) {
279                             Slog.e(
280                                     TAG,
281                                     "Failed to get copy to destination: " + destination.getName(),
282                                     e);
283                             destination.delete();
284                             exceptionsByFileName.put(
285                                     fileName,
286                                     new HealthConnectException(
287                                             HealthConnectException.ERROR_SECURITY, e.getMessage()));
288                         } finally {
289                             try {
290                                 pfd.close();
291                             } catch (IOException e) {
292                                 exceptionsByFileName.put(
293                                         fileName,
294                                         new HealthConnectException(
295                                                 HealthConnectException.ERROR_IO, e.getMessage()));
296                             }
297                         }
298                     });
299         } finally {
300             // We are done staging all the remote data, update the data restore state.
301             // Even if we encountered any exception we still say that we are "done" as
302             // we don't expect the caller to retry and see different results.
303             setInternalRestoreState(INTERNAL_RESTORE_STATE_STAGING_DONE, false);
304             mActivelyStagingRemoteData = false;
305 
306             // Share the result / exception with the caller.
307             try {
308                 if (exceptionsByFileName.isEmpty()) {
309                     callback.onResult();
310                 } else {
311                     Slog.i(TAG, "Exceptions encountered during staging.");
312                     setDataRestoreError(RESTORE_ERROR_FETCHING_DATA);
313                     callback.onError(new StageRemoteDataException(exceptionsByFileName));
314                 }
315             } catch (RemoteException e) {
316                 Log.e(TAG, "Restore response could not be sent to the caller.", e);
317             } catch (SecurityException e) {
318                 Log.e(
319                         TAG,
320                         "Restore response could not be sent due to conflicting AIDL definitions",
321                         e);
322             } finally {
323                 // Now that the callback for the stageAllHealthConnectRemoteData API has been called
324                 // we can start the merging process.
325                 merge();
326             }
327         }
328     }
329 
330     /** Writes the backup data into files represented by the passed file descriptors. */
getAllDataForBackup( @onNull StageRemoteDataRequest stageRemoteDataRequest, @NonNull UserHandle userHandle)331     public void getAllDataForBackup(
332             @NonNull StageRemoteDataRequest stageRemoteDataRequest,
333             @NonNull UserHandle userHandle) {
334         Slog.d(TAG, "Incoming request to get all data for backup");
335         Map<String, ParcelFileDescriptor> pfdsByFileName =
336                 stageRemoteDataRequest.getPfdsByFileName();
337 
338         var backupFilesByFileNames = getBackupFilesByFileNames(userHandle);
339         pfdsByFileName.forEach(
340                 (fileName, pfd) -> {
341                     @SuppressWarnings("NullAway") // TODO(b/317029272): fix this suppression
342                     Path sourceFilePath = backupFilesByFileNames.get(fileName).toPath();
343                     try (FileOutputStream outputStream =
344                             new FileOutputStream(pfd.getFileDescriptor())) {
345                         Files.copy(sourceFilePath, outputStream);
346                     } catch (IOException | SecurityException e) {
347                         Slog.e(TAG, "Failed to send " + fileName + " for backup", e);
348                     } finally {
349                         try {
350                             pfd.close();
351                         } catch (IOException e) {
352                             Slog.e(TAG, "Failed to close " + fileName + " for backup", e);
353                         }
354                     }
355                 });
356     }
357 
358     /** Get the file names of all the files that are transported during backup / restore. */
getAllBackupFileNames(boolean forDeviceToDevice)359     public BackupFileNamesSet getAllBackupFileNames(boolean forDeviceToDevice) {
360         ArraySet<String> backupFileNames = new ArraySet<>();
361         if (forDeviceToDevice) {
362             backupFileNames.add(STAGED_DATABASE_NAME);
363         }
364         backupFileNames.add(GRANT_TIME_FILE_NAME);
365         return new BackupFileNamesSet(backupFileNames);
366     }
367 
368     /** Updates the download state of the remote data. */
updateDataDownloadState(@ataDownloadState int downloadState)369     public void updateDataDownloadState(@DataDownloadState int downloadState) {
370         setDataDownloadState(downloadState, false /* force */);
371 
372         if (downloadState == DATA_DOWNLOAD_COMPLETE) {
373             setInternalRestoreState(INTERNAL_RESTORE_STATE_WAITING_FOR_STAGING, false /* force */);
374         } else if (downloadState == DATA_DOWNLOAD_FAILED) {
375             setInternalRestoreState(INTERNAL_RESTORE_STATE_MERGING_DONE, false /* force */);
376             setDataRestoreError(RESTORE_ERROR_FETCHING_DATA);
377         }
378     }
379 
380     /** Deletes all the staged data and resets all the states. */
381     @SuppressWarnings("NullAway") // TODO(b/317029272): fix this suppression
deleteAndResetEverything(@onNull UserHandle userHandle)382     public void deleteAndResetEverything(@NonNull UserHandle userHandle) {
383         DatabaseContext dbContext =
384                 DatabaseContext.create(mContext, STAGED_DATABASE_DIR, userHandle);
385 
386         // Don't delete anything while we are in the process of merging staged data.
387         synchronized (mMergingLock) {
388             dbContext.deleteDatabase(STAGED_DATABASE_NAME);
389             FilesUtil.deleteDir(dbContext.getDatabaseDir());
390         }
391         setDataDownloadState(DATA_DOWNLOAD_STATE_UNKNOWN, true /* force */);
392         setInternalRestoreState(INTERNAL_RESTORE_STATE_UNKNOWN, true /* force */);
393         setDataRestoreError(RESTORE_ERROR_NONE);
394     }
395 
396     /** Shares the {@link HealthConnectDataState} in the provided callback. */
getDataRestoreState()397     public @HealthConnectDataState.DataRestoreState int getDataRestoreState() {
398         @InternalRestoreState int currentRestoreState = getInternalRestoreState();
399         @DataDownloadState int currentDownloadState = getDataDownloadState();
400 
401         // Return IDLE if neither the download or restore has started yet.
402         if (currentRestoreState == INTERNAL_RESTORE_STATE_UNKNOWN
403                 && currentDownloadState == DATA_DOWNLOAD_STATE_UNKNOWN) {
404             return RESTORE_STATE_IDLE;
405         }
406 
407         // Return IDLE if restore is complete.
408         if (currentRestoreState == INTERNAL_RESTORE_STATE_MERGING_DONE) {
409             return RESTORE_STATE_IDLE;
410         }
411         // Return IN_PROGRESS if merging is currently in progress.
412         if (currentRestoreState == INTERNAL_RESTORE_STATE_MERGING_IN_PROGRESS) {
413             return RESTORE_STATE_IN_PROGRESS;
414         }
415 
416         // In all other cases, return restore pending.
417         return RESTORE_STATE_PENDING;
418     }
419 
420     /** Get the current data restore error. */
getDataRestoreError()421     public @HealthConnectDataState.DataRestoreError int getDataRestoreError() {
422         @HealthConnectDataState.DataRestoreError int dataRestoreError = RESTORE_ERROR_NONE;
423         String restoreErrorOnDisk =
424                 PreferenceHelper.getInstance().getPreference(DATA_RESTORE_ERROR_KEY);
425 
426         if (restoreErrorOnDisk == null) {
427             return dataRestoreError;
428         }
429         try {
430             dataRestoreError = Integer.parseInt(restoreErrorOnDisk);
431         } catch (Exception e) {
432             Slog.e(TAG, "Exception parsing restoreErrorOnDisk " + restoreErrorOnDisk, e);
433         }
434         return dataRestoreError;
435     }
436 
437     /** Returns the file names of all the staged files. */
438     @VisibleForTesting
getStagedRemoteFileNames(@onNull UserHandle userHandle)439     public Set<String> getStagedRemoteFileNames(@NonNull UserHandle userHandle) {
440         DatabaseContext dbContext =
441                 DatabaseContext.create(mContext, STAGED_DATABASE_DIR, userHandle);
442         File[] allFiles = dbContext.getDatabaseDir().listFiles();
443         if (allFiles == null) {
444             return Collections.emptySet();
445         }
446         return Stream.of(allFiles)
447                 .filter(file -> !file.isDirectory())
448                 .map(File::getName)
449                 .collect(Collectors.toSet());
450     }
451 
452     /** Returns true if restore merging is in progress. API calls are blocked when this is true. */
isRestoreMergingInProgress()453     public boolean isRestoreMergingInProgress() {
454         return getInternalRestoreState() == INTERNAL_RESTORE_STATE_MERGING_IN_PROGRESS;
455     }
456 
457     /** Schedules any pending jobs. */
scheduleAllJobs()458     public void scheduleAllJobs() {
459         scheduleDownloadStateTimeoutJob();
460         scheduleStagingTimeoutJob();
461         scheduleMergingTimeoutJob();
462 
463         // We can schedule "retry merging" only if we are in the STAGING_DONE state.  However, if we
464         // are in STAGING_DONE state, then we should definitely attempt merging now - and that's
465         // what we will do below.
466         // So, there's no point in scheduling a "retry merging" job.  If Migration is going on then
467         // the merge attempt will take care of that automatically (and schedule the retry job as
468         // needed).
469         triggerMergingIfApplicable();
470     }
471 
472     /** Cancel all the jobs and sets the cancelled time. */
cancelAllJobs()473     public void cancelAllJobs() {
474         BackupRestoreJobService.cancelAllJobs(mContext);
475         setJobCancelledTimeIfExists(DATA_DOWNLOAD_TIMEOUT_KEY, DATA_DOWNLOAD_TIMEOUT_CANCELLED_KEY);
476         setJobCancelledTimeIfExists(DATA_STAGING_TIMEOUT_KEY, DATA_STAGING_TIMEOUT_CANCELLED_KEY);
477         setJobCancelledTimeIfExists(DATA_MERGING_TIMEOUT_KEY, DATA_MERGING_TIMEOUT_CANCELLED_KEY);
478         setJobCancelledTimeIfExists(DATA_MERGING_RETRY_KEY, DATA_MERGING_RETRY_CANCELLED_KEY);
479     }
480 
getCurrentUserHandle()481     public UserHandle getCurrentUserHandle() {
482         return mCurrentForegroundUser;
483     }
484 
setInternalRestoreState(@nternalRestoreState int dataRestoreState, boolean force)485     void setInternalRestoreState(@InternalRestoreState int dataRestoreState, boolean force) {
486         @InternalRestoreState int currentRestoreState = getInternalRestoreState();
487         mStatesLock.writeLock().lock();
488         try {
489             if (!force && currentRestoreState >= dataRestoreState) {
490                 Slog.w(
491                         TAG,
492                         "Attempt to update data restore state in wrong order from "
493                                 + currentRestoreState
494                                 + " to "
495                                 + dataRestoreState);
496                 return;
497             }
498             PreferenceHelper.getInstance()
499                     .insertOrReplacePreference(
500                             DATA_RESTORE_STATE_KEY, String.valueOf(dataRestoreState));
501 
502             if (dataRestoreState == INTERNAL_RESTORE_STATE_WAITING_FOR_STAGING
503                     || dataRestoreState == INTERNAL_RESTORE_STATE_STAGING_IN_PROGRESS) {
504                 scheduleStagingTimeoutJob();
505             } else if (dataRestoreState == INTERNAL_RESTORE_STATE_MERGING_IN_PROGRESS) {
506                 scheduleMergingTimeoutJob();
507             }
508         } finally {
509             mStatesLock.writeLock().unlock();
510         }
511     }
512 
513     @InternalRestoreState
getInternalRestoreState()514     int getInternalRestoreState() {
515         mStatesLock.readLock().lock();
516         try {
517             String restoreStateOnDisk =
518                     PreferenceHelper.getInstance().getPreference(DATA_RESTORE_STATE_KEY);
519             @InternalRestoreState int currentRestoreState = INTERNAL_RESTORE_STATE_UNKNOWN;
520             if (restoreStateOnDisk == null) {
521                 return currentRestoreState;
522             }
523             try {
524                 currentRestoreState = Integer.parseInt(restoreStateOnDisk);
525             } catch (Exception e) {
526                 Slog.e(TAG, "Exception parsing restoreStateOnDisk: " + restoreStateOnDisk, e);
527             }
528             // If we are not actively staging the data right now but the disk still reflects that we
529             // are then that means we died in the middle of staging.  We should be waiting for the
530             // remote data to be staged now.
531             if (!mActivelyStagingRemoteData
532                     && currentRestoreState == INTERNAL_RESTORE_STATE_STAGING_IN_PROGRESS) {
533                 currentRestoreState = INTERNAL_RESTORE_STATE_WAITING_FOR_STAGING;
534             }
535             return currentRestoreState;
536         } finally {
537             mStatesLock.readLock().unlock();
538         }
539     }
540 
541     /** Returns true if this job needs rescheduling; false otherwise. */
542     @VisibleForTesting
handleJob(PersistableBundle extras)543     boolean handleJob(PersistableBundle extras) {
544         String jobName = extras.getString(EXTRA_JOB_NAME_KEY);
545         switch (jobName) {
546             case DATA_DOWNLOAD_TIMEOUT_KEY -> executeDownloadStateTimeoutJob();
547             case DATA_STAGING_TIMEOUT_KEY -> executeStagingTimeoutJob();
548             case DATA_MERGING_TIMEOUT_KEY -> executeMergingTimeoutJob();
549             case DATA_MERGING_RETRY_KEY -> executeRetryMergingJob();
550             default -> Slog.w(TAG, "Unknown job" + jobName + " delivered.");
551         }
552         // None of the jobs want to reschedule.
553         return false;
554     }
555 
556     @VisibleForTesting
shouldAttemptMerging()557     boolean shouldAttemptMerging() {
558         @InternalRestoreState int internalRestoreState = getInternalRestoreState();
559         if (internalRestoreState == INTERNAL_RESTORE_STATE_STAGING_DONE
560                 || internalRestoreState == INTERNAL_RESTORE_STATE_MERGING_IN_PROGRESS
561                 || internalRestoreState == INTERNAL_RESTORE_STATE_MERGING_DONE_OLD_CODE) {
562             Slog.i(TAG, "Should attempt merging now with state = " + internalRestoreState);
563             return true;
564         }
565         return false;
566     }
567 
568     @VisibleForTesting
merge()569     void merge() {
570         @InternalRestoreState int internalRestoreState = getInternalRestoreState();
571         if (internalRestoreState >= INTERNAL_RESTORE_STATE_MERGING_IN_PROGRESS) {
572             Slog.i(TAG, "Not merging as internalRestoreState is " + internalRestoreState);
573             return;
574         }
575 
576         if (mMigrationStateManager.isMigrationInProgress()) {
577             Slog.i(TAG, "Not merging as Migration in progress.");
578             scheduleRetryMergingJob();
579             return;
580         }
581 
582         int currentDbVersion = TransactionManager.getInitialisedInstance().getDatabaseVersion();
583         DatabaseContext dbContext =
584                 DatabaseContext.create(mContext, STAGED_DATABASE_DIR, mCurrentForegroundUser);
585         File stagedDbFile = dbContext.getDatabasePath(STAGED_DATABASE_NAME);
586         if (stagedDbFile.exists()) {
587             try (SQLiteDatabase stagedDb =
588                     SQLiteDatabase.openDatabase(
589                             stagedDbFile, new SQLiteDatabase.OpenParams.Builder().build())) {
590                 int stagedDbVersion = stagedDb.getVersion();
591                 Slog.i(
592                         TAG,
593                         "merging staged data, current version = "
594                                 + currentDbVersion
595                                 + ", staged version = "
596                                 + stagedDbVersion);
597                 if (currentDbVersion < stagedDbVersion) {
598                     Slog.i(TAG, "Module needs upgrade for merging to version " + stagedDbVersion);
599                     setDataRestoreError(RESTORE_ERROR_VERSION_DIFF);
600                     return;
601                 }
602             }
603         } else {
604             Slog.i(TAG, "No database file found to merge.");
605         }
606 
607         Slog.i(TAG, "Starting the data merge.");
608         setInternalRestoreState(INTERNAL_RESTORE_STATE_MERGING_IN_PROGRESS, false);
609         mergeGrantTimes(dbContext);
610         mergeDatabase(dbContext);
611         setInternalRestoreState(INTERNAL_RESTORE_STATE_MERGING_DONE, false);
612 
613         // Reset the error in case it was due to version diff.
614         // TODO(b/327170886): Should we always set it to NONE once merging is done?
615         if (getDataRestoreError() == RESTORE_ERROR_VERSION_DIFF) {
616             setDataRestoreError(RESTORE_ERROR_NONE);
617         }
618     }
619 
getBackupFilesByFileNames(UserHandle userHandle)620     private Map<String, File> getBackupFilesByFileNames(UserHandle userHandle) {
621         ArrayMap<String, File> backupFilesByFileNames = new ArrayMap<>();
622 
623         File databasePath = TransactionManager.getInitialisedInstance().getDatabasePath();
624         backupFilesByFileNames.put(STAGED_DATABASE_NAME, databasePath);
625 
626         File backupDataDir = getBackupDataDirectoryForUser(userHandle.getIdentifier());
627         backupDataDir.mkdirs();
628         File grantTimeFile = new File(backupDataDir, GRANT_TIME_FILE_NAME);
629         try {
630             grantTimeFile.createNewFile();
631             GrantTimeXmlHelper.serializeGrantTimes(
632                     grantTimeFile, mFirstGrantTimeManager.getGrantTimeStateForUser(userHandle));
633             backupFilesByFileNames.put(grantTimeFile.getName(), grantTimeFile);
634         } catch (IOException e) {
635             Slog.e(TAG, "Could not create the grant time file for backup.", e);
636         }
637 
638         return backupFilesByFileNames;
639     }
640 
641     @DataDownloadState
getDataDownloadState()642     private int getDataDownloadState() {
643         mStatesLock.readLock().lock();
644         try {
645             String downloadStateOnDisk =
646                     PreferenceHelper.getInstance().getPreference(DATA_DOWNLOAD_STATE_KEY);
647             @DataDownloadState int currentDownloadState = DATA_DOWNLOAD_STATE_UNKNOWN;
648             if (downloadStateOnDisk == null) {
649                 return currentDownloadState;
650             }
651             try {
652                 currentDownloadState = Integer.parseInt(downloadStateOnDisk);
653             } catch (Exception e) {
654                 Slog.e(TAG, "Exception parsing downloadStateOnDisk " + downloadStateOnDisk, e);
655             }
656             return currentDownloadState;
657         } finally {
658             mStatesLock.readLock().unlock();
659         }
660     }
661 
setDataDownloadState(@ataDownloadState int downloadState, boolean force)662     private void setDataDownloadState(@DataDownloadState int downloadState, boolean force) {
663         mStatesLock.writeLock().lock();
664         try {
665             @DataDownloadState int currentDownloadState = getDataDownloadState();
666             if (!force
667                     && (currentDownloadState == DATA_DOWNLOAD_FAILED
668                             || currentDownloadState == DATA_DOWNLOAD_COMPLETE)) {
669                 Slog.w(TAG, "HC data download already in terminal state.");
670                 return;
671             }
672             PreferenceHelper.getInstance()
673                     .insertOrReplacePreference(
674                             DATA_DOWNLOAD_STATE_KEY, String.valueOf(downloadState));
675 
676             if (downloadState == DATA_DOWNLOAD_STARTED || downloadState == DATA_DOWNLOAD_RETRY) {
677                 PreferenceHelper.getInstance()
678                         .insertOrReplacePreference(
679                                 DATA_DOWNLOAD_TIMEOUT_KEY,
680                                 Long.toString(Instant.now().toEpochMilli()));
681                 scheduleDownloadStateTimeoutJob();
682             }
683         } finally {
684             mStatesLock.writeLock().unlock();
685         }
686     }
687 
688     // Creating a separate single line method to keep this code close to the rest of the code that
689     // uses PreferenceHelper to keep data on the disk.
setDataRestoreError( @ealthConnectDataState.DataRestoreError int dataRestoreError)690     private void setDataRestoreError(
691             @HealthConnectDataState.DataRestoreError int dataRestoreError) {
692         PreferenceHelper.getInstance()
693                 .insertOrReplacePreference(
694                         DATA_RESTORE_ERROR_KEY, String.valueOf(dataRestoreError));
695     }
696 
697     /** Schedule timeout for data download state so that we are not stuck in the current state. */
scheduleDownloadStateTimeoutJob()698     private void scheduleDownloadStateTimeoutJob() {
699         @DataDownloadState int currentDownloadState = getDataDownloadState();
700         if (currentDownloadState != DATA_DOWNLOAD_STARTED
701                 && currentDownloadState != DATA_DOWNLOAD_RETRY) {
702             Slog.i(
703                     TAG,
704                     "Attempt to schedule download timeout job with state: " + currentDownloadState);
705             // We are not in the correct state. There's no need to set the timer.
706             return;
707         }
708 
709         // We might be here because the device rebooted or the user switched. If a timer was already
710         // going on then we want to continue that timer.
711         long timeoutMillis =
712                 getRemainingTimeoutMillis(
713                         DATA_DOWNLOAD_TIMEOUT_KEY,
714                         DATA_DOWNLOAD_TIMEOUT_CANCELLED_KEY,
715                         DATA_DOWNLOAD_TIMEOUT_INTERVAL_MILLIS);
716 
717         int userId = mCurrentForegroundUser.getIdentifier();
718         final PersistableBundle extras = new PersistableBundle();
719         extras.putInt(EXTRA_USER_ID, userId);
720         extras.putString(EXTRA_JOB_NAME_KEY, DATA_DOWNLOAD_TIMEOUT_KEY);
721         JobInfo.Builder jobInfoBuilder =
722                 new JobInfo.Builder(
723                                 BackupRestoreJobService.BACKUP_RESTORE_JOB_ID + userId,
724                                 new ComponentName(mContext, BackupRestoreJobService.class))
725                         .setExtras(extras)
726                         .setMinimumLatency(timeoutMillis)
727                         .setOverrideDeadline(timeoutMillis + MINIMUM_LATENCY_WINDOW_MILLIS);
728         Slog.i(
729                 TAG,
730                 "Scheduling download state timeout job with period: " + timeoutMillis + " millis");
731         BackupRestoreJobService.schedule(mContext, jobInfoBuilder.build(), this);
732 
733         // Set the start time
734         PreferenceHelper.getInstance()
735                 .insertOrReplacePreference(
736                         DATA_DOWNLOAD_TIMEOUT_KEY, Long.toString(Instant.now().toEpochMilli()));
737     }
738 
executeDownloadStateTimeoutJob()739     private void executeDownloadStateTimeoutJob() {
740         @DataDownloadState int currentDownloadState = getDataDownloadState();
741         if (currentDownloadState == DATA_DOWNLOAD_STARTED
742                 || currentDownloadState == DATA_DOWNLOAD_RETRY) {
743             Slog.i(TAG, "Executing download state timeout job");
744             setDataDownloadState(DATA_DOWNLOAD_FAILED, false);
745             setDataRestoreError(RESTORE_ERROR_FETCHING_DATA);
746             // Remove the remaining timeouts from the disk
747             PreferenceHelper.getInstance().insertOrReplacePreference(DATA_DOWNLOAD_TIMEOUT_KEY, "");
748             PreferenceHelper.getInstance()
749                     .insertOrReplacePreference(DATA_DOWNLOAD_TIMEOUT_CANCELLED_KEY, "");
750         } else {
751             Slog.i(TAG, "Download state timeout job fired in state: " + currentDownloadState);
752         }
753     }
754 
755     /** Schedule timeout for data staging state so that we are not stuck in the current state. */
scheduleStagingTimeoutJob()756     private void scheduleStagingTimeoutJob() {
757         @InternalRestoreState int internalRestoreState = getInternalRestoreState();
758         if (internalRestoreState != INTERNAL_RESTORE_STATE_WAITING_FOR_STAGING
759                 && internalRestoreState != INTERNAL_RESTORE_STATE_STAGING_IN_PROGRESS) {
760             // We are not in the correct state. There's no need to set the timer.
761             Slog.i(
762                     TAG,
763                     "Attempt to schedule staging timeout job with state: " + internalRestoreState);
764             return;
765         }
766 
767         // We might be here because the device rebooted or the user switched. If a timer was already
768         // going on then we want to continue that timer.
769         long timeoutMillis =
770                 getRemainingTimeoutMillis(
771                         DATA_STAGING_TIMEOUT_KEY,
772                         DATA_STAGING_TIMEOUT_CANCELLED_KEY,
773                         DATA_STAGING_TIMEOUT_INTERVAL_MILLIS);
774 
775         int userId = mCurrentForegroundUser.getIdentifier();
776         final PersistableBundle extras = new PersistableBundle();
777         extras.putInt(EXTRA_USER_ID, userId);
778         extras.putString(EXTRA_JOB_NAME_KEY, DATA_STAGING_TIMEOUT_KEY);
779         JobInfo.Builder jobInfoBuilder =
780                 new JobInfo.Builder(
781                                 BackupRestoreJobService.BACKUP_RESTORE_JOB_ID + userId,
782                                 new ComponentName(mContext, BackupRestoreJobService.class))
783                         .setExtras(extras)
784                         .setMinimumLatency(timeoutMillis)
785                         .setOverrideDeadline(timeoutMillis + MINIMUM_LATENCY_WINDOW_MILLIS);
786         Slog.i(TAG, "Scheduling staging timeout job with period: " + timeoutMillis + " millis");
787         BackupRestoreJobService.schedule(mContext, jobInfoBuilder.build(), this);
788 
789         // Set the start time
790         PreferenceHelper.getInstance()
791                 .insertOrReplacePreference(
792                         DATA_STAGING_TIMEOUT_KEY, Long.toString(Instant.now().toEpochMilli()));
793     }
794 
executeStagingTimeoutJob()795     private void executeStagingTimeoutJob() {
796         @InternalRestoreState int internalRestoreState = getInternalRestoreState();
797         if (internalRestoreState == INTERNAL_RESTORE_STATE_WAITING_FOR_STAGING
798                 || internalRestoreState == INTERNAL_RESTORE_STATE_STAGING_IN_PROGRESS) {
799             Slog.i(TAG, "Executing staging timeout job");
800             setInternalRestoreState(INTERNAL_RESTORE_STATE_MERGING_DONE, false);
801             setDataRestoreError(RESTORE_ERROR_UNKNOWN);
802             // Remove the remaining timeouts from the disk
803             PreferenceHelper.getInstance().insertOrReplacePreference(DATA_STAGING_TIMEOUT_KEY, "");
804             PreferenceHelper.getInstance()
805                     .insertOrReplacePreference(DATA_STAGING_TIMEOUT_CANCELLED_KEY, "");
806         } else {
807             Slog.i(TAG, "Staging timeout job fired in state: " + internalRestoreState);
808         }
809     }
810 
811     /** Schedule timeout for data merging state so that we are not stuck in the current state. */
scheduleMergingTimeoutJob()812     private void scheduleMergingTimeoutJob() {
813         @InternalRestoreState int internalRestoreState = getInternalRestoreState();
814         if (internalRestoreState != INTERNAL_RESTORE_STATE_MERGING_IN_PROGRESS) {
815             // We are not in the correct state. There's no need to set the timer.
816             Slog.i(
817                     TAG,
818                     "Attempt to schedule merging timeout job with state: " + internalRestoreState);
819             return;
820         }
821 
822         // We might be here because the device rebooted or the user switched. If a timer was already
823         // going on then we want to continue that timer.
824         long timeoutMillis =
825                 getRemainingTimeoutMillis(
826                         DATA_MERGING_TIMEOUT_KEY,
827                         DATA_MERGING_TIMEOUT_CANCELLED_KEY,
828                         DATA_MERGING_TIMEOUT_INTERVAL_MILLIS);
829 
830         int userId = mCurrentForegroundUser.getIdentifier();
831         final PersistableBundle extras = new PersistableBundle();
832         extras.putInt(EXTRA_USER_ID, userId);
833         extras.putString(EXTRA_JOB_NAME_KEY, DATA_MERGING_TIMEOUT_KEY);
834         JobInfo.Builder jobInfoBuilder =
835                 new JobInfo.Builder(
836                                 BackupRestoreJobService.BACKUP_RESTORE_JOB_ID + userId,
837                                 new ComponentName(mContext, BackupRestoreJobService.class))
838                         .setExtras(extras)
839                         .setMinimumLatency(timeoutMillis)
840                         .setOverrideDeadline(timeoutMillis + MINIMUM_LATENCY_WINDOW_MILLIS);
841         Slog.i(TAG, "Scheduling merging timeout job with period: " + timeoutMillis + " millis");
842         BackupRestoreJobService.schedule(mContext, jobInfoBuilder.build(), this);
843 
844         // Set the start time
845         PreferenceHelper.getInstance()
846                 .insertOrReplacePreference(
847                         DATA_MERGING_TIMEOUT_KEY, Long.toString(Instant.now().toEpochMilli()));
848     }
849 
executeMergingTimeoutJob()850     private void executeMergingTimeoutJob() {
851         @InternalRestoreState int internalRestoreState = getInternalRestoreState();
852         if (internalRestoreState == INTERNAL_RESTORE_STATE_MERGING_IN_PROGRESS) {
853             Slog.i(TAG, "Executing merging timeout job");
854             setInternalRestoreState(INTERNAL_RESTORE_STATE_MERGING_DONE, false);
855             setDataRestoreError(RESTORE_ERROR_UNKNOWN);
856             // Remove the remaining timeouts from the disk
857             PreferenceHelper.getInstance().insertOrReplacePreference(DATA_MERGING_TIMEOUT_KEY, "");
858             PreferenceHelper.getInstance()
859                     .insertOrReplacePreference(DATA_MERGING_TIMEOUT_CANCELLED_KEY, "");
860         } else {
861             Slog.i(TAG, "Merging timeout job fired in state: " + internalRestoreState);
862         }
863     }
864 
scheduleRetryMergingJob()865     private void scheduleRetryMergingJob() {
866         @InternalRestoreState int internalRestoreState = getInternalRestoreState();
867         if (internalRestoreState != INTERNAL_RESTORE_STATE_STAGING_DONE) {
868             // We can do merging only if we are in the STAGING_DONE state.
869             Slog.i(
870                     TAG,
871                     "Attempt to schedule merging retry job with state: " + internalRestoreState);
872             return;
873         }
874 
875         int userId = mCurrentForegroundUser.getIdentifier();
876         final PersistableBundle extras = new PersistableBundle();
877         extras.putInt(EXTRA_USER_ID, userId);
878         extras.putString(EXTRA_JOB_NAME_KEY, DATA_MERGING_RETRY_KEY);
879 
880         // We might be here because the device rebooted or the user switched. If a timer was already
881         // going on then we want to continue that timer.
882         long timeoutMillis =
883                 getRemainingTimeoutMillis(
884                         DATA_MERGING_RETRY_KEY,
885                         DATA_MERGING_RETRY_CANCELLED_KEY,
886                         DATA_MERGING_RETRY_DELAY_MILLIS);
887         JobInfo.Builder jobInfoBuilder =
888                 new JobInfo.Builder(
889                                 BackupRestoreJobService.BACKUP_RESTORE_JOB_ID + userId,
890                                 new ComponentName(mContext, BackupRestoreJobService.class))
891                         .setExtras(extras)
892                         .setMinimumLatency(timeoutMillis)
893                         .setOverrideDeadline(timeoutMillis + MINIMUM_LATENCY_WINDOW_MILLIS);
894         Slog.i(TAG, "Scheduling retry merging job with period: " + timeoutMillis + " millis");
895         BackupRestoreJobService.schedule(mContext, jobInfoBuilder.build(), this);
896 
897         // Set the start time
898         PreferenceHelper.getInstance()
899                 .insertOrReplacePreference(
900                         DATA_MERGING_RETRY_KEY, Long.toString(Instant.now().toEpochMilli()));
901     }
902 
executeRetryMergingJob()903     private void executeRetryMergingJob() {
904         @InternalRestoreState int internalRestoreState = getInternalRestoreState();
905         if (internalRestoreState == INTERNAL_RESTORE_STATE_STAGING_DONE) {
906             Slog.i(TAG, "Retrying merging");
907             merge();
908 
909             if (getInternalRestoreState() == INTERNAL_RESTORE_STATE_MERGING_DONE) {
910                 // Remove the remaining timeouts from the disk
911                 PreferenceHelper.getInstance()
912                         .insertOrReplacePreference(DATA_MERGING_RETRY_KEY, "");
913                 PreferenceHelper.getInstance()
914                         .insertOrReplacePreference(DATA_MERGING_RETRY_CANCELLED_KEY, "");
915             }
916         } else {
917             Slog.i(TAG, "Merging retry job fired in state: " + internalRestoreState);
918         }
919     }
920 
triggerMergingIfApplicable()921     private void triggerMergingIfApplicable() {
922         HealthConnectThreadScheduler.scheduleInternalTask(
923                 () -> {
924                     if (shouldAttemptMerging()) {
925                         Slog.i(TAG, "Attempting merging.");
926                         setInternalRestoreState(INTERNAL_RESTORE_STATE_STAGING_DONE, true);
927                         merge();
928                     }
929                 });
930     }
931 
getRemainingTimeoutMillis( String startTimeKey, String cancelledTimeKey, long stdTimeout)932     private long getRemainingTimeoutMillis(
933             String startTimeKey, String cancelledTimeKey, long stdTimeout) {
934         String startTimeStr = PreferenceHelper.getInstance().getPreference(startTimeKey);
935         if (startTimeStr == null || startTimeStr.trim().isEmpty()) {
936             return stdTimeout;
937         }
938         long currTime = Instant.now().toEpochMilli();
939         String cancelledTimeStr = PreferenceHelper.getInstance().getPreference(cancelledTimeKey);
940         if (cancelledTimeStr == null || cancelledTimeStr.trim().isEmpty()) {
941             return Math.max(0, stdTimeout - (currTime - Long.parseLong(startTimeStr)));
942         }
943         long spentTime = Long.parseLong(cancelledTimeStr) - Long.parseLong(startTimeStr);
944         return Math.max(0, stdTimeout - spentTime);
945     }
946 
setJobCancelledTimeIfExists(String startTimeKey, String cancelTimeKey)947     private void setJobCancelledTimeIfExists(String startTimeKey, String cancelTimeKey) {
948         if (PreferenceHelper.getInstance().getPreference(startTimeKey) != null) {
949             PreferenceHelper.getInstance()
950                     .insertOrReplacePreference(
951                             cancelTimeKey, Long.toString(Instant.now().toEpochMilli()));
952         }
953     }
954 
getBackupDataDirectoryForUser(int userId)955     private static File getBackupDataDirectoryForUser(int userId) {
956         return getNamedHcDirectoryForUser("backup", userId);
957     }
958 
getNamedHcDirectoryForUser(String dirName, int userId)959     private static File getNamedHcDirectoryForUser(String dirName, int userId) {
960         File hcDirectoryForUser = FilesUtil.getDataSystemCeHCDirectoryForUser(userId);
961         return new File(hcDirectoryForUser, dirName);
962     }
963 
mergeGrantTimes(DatabaseContext dbContext)964     private void mergeGrantTimes(DatabaseContext dbContext) {
965         Slog.i(TAG, "Merging grant times.");
966         File restoredGrantTimeFile = new File(dbContext.getDatabaseDir(), GRANT_TIME_FILE_NAME);
967         UserGrantTimeState userGrantTimeState =
968                 GrantTimeXmlHelper.parseGrantTime(restoredGrantTimeFile);
969         mFirstGrantTimeManager.applyAndStageGrantTimeStateForUser(
970                 mCurrentForegroundUser, userGrantTimeState);
971     }
972 
973     @SuppressWarnings("NullAway") // TODO(b/317029272): fix this suppression
mergeDatabase(DatabaseContext dbContext)974     private void mergeDatabase(DatabaseContext dbContext) {
975         synchronized (mMergingLock) {
976             if (!dbContext.getDatabasePath(STAGED_DATABASE_NAME).exists()) {
977                 Slog.i(TAG, "No staged db found.");
978                 // no db was staged
979                 return;
980             }
981 
982             mDatabaseMerger.merge(new HealthConnectDatabase(dbContext, STAGED_DATABASE_NAME));
983 
984             // Delete the staged db as we are done merging.
985             Slog.i(TAG, "Deleting staged db after merging.");
986             dbContext.deleteDatabase(STAGED_DATABASE_NAME);
987         }
988     }
989 
990     /** Execute the task as critical section by holding read lock. */
runWithStatesReadLock(RunnableWithThrowable<E> task)991     public <E extends Throwable> void runWithStatesReadLock(RunnableWithThrowable<E> task)
992             throws E {
993         mStatesLock.readLock().lock();
994         try {
995             task.run();
996         } finally {
997             mStatesLock.readLock().unlock();
998         }
999     }
1000 
1001     /** Schedules the jobs for {@link BackupRestore} */
1002     public static final class BackupRestoreJobService extends JobService {
1003         public static final String BACKUP_RESTORE_JOBS_NAMESPACE = "BACKUP_RESTORE_JOBS_NAMESPACE";
1004         public static final String EXTRA_USER_ID = "user_id";
1005         public static final String EXTRA_JOB_NAME_KEY = "job_name";
1006         private static final int BACKUP_RESTORE_JOB_ID = 1000;
1007 
1008         @SuppressWarnings("NullAway.Init") // TODO(b/317029272): fix this suppression
1009         static volatile BackupRestore sBackupRestore;
1010 
1011         @Override
onStartJob(JobParameters params)1012         public boolean onStartJob(JobParameters params) {
1013             int userId = params.getExtras().getInt(EXTRA_USER_ID, DEFAULT_INT);
1014             if (userId != sBackupRestore.getCurrentUserHandle().getIdentifier()) {
1015                 Slog.w(
1016                         TAG,
1017                         "Got onStartJob for non active user: "
1018                                 + userId
1019                                 + ", but the current active user is: "
1020                                 + sBackupRestore.getCurrentUserHandle().getIdentifier());
1021                 return false;
1022             }
1023 
1024             String jobName = params.getExtras().getString(EXTRA_JOB_NAME_KEY);
1025             if (Objects.isNull(jobName)) {
1026                 Slog.w(TAG, "Got onStartJob for a nameless job");
1027                 return false;
1028             }
1029 
1030             HealthConnectThreadScheduler.scheduleInternalTask(
1031                     () -> jobFinished(params, sBackupRestore.handleJob(params.getExtras())));
1032 
1033             return true;
1034         }
1035 
1036         @Override
onStopJob(JobParameters params)1037         public boolean onStopJob(JobParameters params) {
1038             return false;
1039         }
1040 
schedule( Context context, @NonNull JobInfo jobInfo, BackupRestore backupRestore)1041         static void schedule(
1042                 Context context, @NonNull JobInfo jobInfo, BackupRestore backupRestore) {
1043             sBackupRestore = backupRestore;
1044             final long token = Binder.clearCallingIdentity();
1045             try {
1046                 int result =
1047                         requireNonNull(context.getSystemService(JobScheduler.class))
1048                                 .forNamespace(BACKUP_RESTORE_JOBS_NAMESPACE)
1049                                 .schedule(jobInfo);
1050 
1051                 if (result != JobScheduler.RESULT_SUCCESS) {
1052                     Slog.e(
1053                             TAG,
1054                             "Failed to schedule: "
1055                                     + jobInfo.getExtras().getString(EXTRA_JOB_NAME_KEY));
1056                 }
1057             } finally {
1058                 Binder.restoreCallingIdentity(token);
1059             }
1060         }
1061 
1062         /** Cancels all jobs for our namespace. */
cancelAllJobs(Context context)1063         public static void cancelAllJobs(Context context) {
1064             requireNonNull(context.getSystemService(JobScheduler.class))
1065                     .forNamespace(BACKUP_RESTORE_JOBS_NAMESPACE)
1066                     .cancelAll();
1067         }
1068     }
1069 }
1070