[ 
https://issues.apache.org/jira/browse/GEODE-3799?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16295601#comment-16295601
 ] 

ASF GitHub Bot commented on GEODE-3799:
---------------------------------------

nreich closed pull request #1177: Revert "GEODE-3799: Move backups towards a 
pluggable architecture (#1…
URL: https://github.com/apache/geode/pull/1177
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git 
a/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreImpl.java 
b/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreImpl.java
index e3ea1fb39c..e3701e0a1c 100644
--- 
a/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreImpl.java
+++ 
b/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreImpl.java
@@ -28,8 +28,6 @@
 import java.nio.channels.ClosedByInterruptException;
 import java.nio.channels.FileChannel;
 import java.nio.channels.FileLock;
-import java.nio.file.Files;
-import java.nio.file.Path;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -38,7 +36,6 @@
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
-import java.util.List;
 import java.util.Map;
 import java.util.Properties;
 import java.util.Set;
@@ -61,11 +58,9 @@
 import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
-import java.util.stream.Collectors;
 
 import it.unimi.dsi.fastutil.ints.IntOpenHashSet;
 import it.unimi.dsi.fastutil.longs.LongOpenHashSet;
-import org.apache.commons.io.FileUtils;
 import org.apache.logging.log4j.Logger;
 
 import org.apache.geode.CancelCriterion;
@@ -1962,8 +1957,6 @@ private void loadFiles(boolean needsOplogs) {
         deleteFiles(overflowFileFilter);
       }
 
-      cleanupOrphanedBackupDirectories();
-
       persistentOplogs.createOplogs(needsOplogs, persistentBackupFiles);
       finished = true;
 
@@ -1989,28 +1982,6 @@ private void loadFiles(boolean needsOplogs) {
     }
   }
 
-  private void cleanupOrphanedBackupDirectories() {
-    for (DirectoryHolder directoryHolder : getDirectoryHolders()) {
-      try {
-        List<Path> backupDirectories = 
Files.list(directoryHolder.getDir().toPath())
-            .filter((path) -> path.getFileName().toString()
-                .startsWith(BackupManager.DATA_STORES_TEMPORARY_DIRECTORY))
-            .filter(p -> Files.isDirectory(p)).collect(Collectors.toList());
-        for (Path backupDirectory : backupDirectories) {
-          try {
-            logger.info("Deleting orphaned backup temporary directory: " + 
backupDirectory);
-            FileUtils.deleteDirectory(backupDirectory.toFile());
-          } catch (IOException e) {
-            logger.warn("Failed to remove orphaned backup temporary directory: 
" + backupDirectory,
-                e);
-          }
-        }
-      } catch (IOException e) {
-        logger.warn(e);
-      }
-    }
-  }
-
   /**
    * The diskStats are at PR level.Hence if the region is a bucket region, the 
stats should not be
    * closed, but the figures of entriesInVM and overflowToDisk contributed by 
that bucket need to be
diff --git 
a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupDefinition.java
 
b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupDefinition.java
deleted file mode 100644
index 685d03b474..0000000000
--- 
a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupDefinition.java
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more 
contributor license
- * agreements. See the NOTICE file distributed with this work for additional 
information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache 
License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the 
License. You may obtain a
- * copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software 
distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 
KIND, either express
- * or implied. See the License for the specific language governing permissions 
and limitations under
- * the License.
- */
-package org.apache.geode.internal.cache.backup;
-
-import java.nio.file.Path;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.geode.cache.DiskStore;
-
-class BackupDefinition {
-  private final Map<DiskStore, Set<Path>> oplogFilesByDiskStore = new 
HashMap<>();
-  private final Set<Path> configFiles = new HashSet<>();
-  private final Set<Path> userFiles = new HashSet<>();
-  private final Set<Path> deployedJars = new HashSet<>();
-  private final Map<DiskStore, Path> diskInitFiles = new HashMap<>();
-  private RestoreScript restoreScript;
-
-  void addConfigFileToBackup(Path configFile) {
-    configFiles.add(configFile);
-  }
-
-  void addUserFilesToBackup(Path userFile) {
-    userFiles.add(userFile);
-  }
-
-  void addDeployedJarToBackup(Path deployedJar) {
-    deployedJars.add(deployedJar);
-  }
-
-  void addDiskInitFile(DiskStore diskStore, Path diskInitFile) {
-    diskInitFiles.put(diskStore, diskInitFile);
-  }
-
-  void setRestoreScript(RestoreScript restoreScript) {
-    this.restoreScript = restoreScript;
-  }
-
-  Map<DiskStore, Collection<Path>> getOplogFilesByDiskStore() {
-    return Collections.unmodifiableMap(oplogFilesByDiskStore);
-  }
-
-  Set<Path> getConfigFiles() {
-    return Collections.unmodifiableSet(configFiles);
-  }
-
-  Set<Path> getUserFiles() {
-    return Collections.unmodifiableSet(userFiles);
-  }
-
-  Set<Path> getDeployedJars() {
-    return Collections.unmodifiableSet(deployedJars);
-  }
-
-  Map<DiskStore, Path> getDiskInitFiles() {
-    return Collections.unmodifiableMap(diskInitFiles);
-  }
-
-  RestoreScript getRestoreScript() {
-    return restoreScript;
-  }
-
-  void addOplogFileToBackup(DiskStore diskStore, Path fileLocation) {
-    Set<Path> files = oplogFilesByDiskStore.computeIfAbsent(diskStore, k -> 
new HashSet<>());
-    files.add(fileLocation);
-  }
-}
diff --git 
a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupDestination.java
 
b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupDestination.java
deleted file mode 100644
index a84deea9a2..0000000000
--- 
a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupDestination.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more 
contributor license
- * agreements. See the NOTICE file distributed with this work for additional 
information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache 
License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the 
License. You may obtain a
- * copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software 
distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 
KIND, either express
- * or implied. See the License for the specific language governing permissions 
and limitations under
- * the License.
- */
-package org.apache.geode.internal.cache.backup;
-
-import java.io.IOException;
-
-public interface BackupDestination {
-  String USER_FILES_DIRECTORY = "user";
-  String DEPLOYED_JARS_DIRECTORY = "user";
-  String CONFIG_DIRECTORY = "config";
-  String BACKUP_DIR_PREFIX = "dir";
-  String README_FILE = "README_FILE.txt";
-  String DATA_STORES_DIRECTORY = "diskstores";
-
-  void backupFiles(BackupDefinition backupDefinition) throws IOException;
-}
diff --git 
a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupManager.java
 
b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupManager.java
index 23ca4c1f86..f773b218f6 100644
--- 
a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupManager.java
+++ 
b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupManager.java
@@ -15,13 +15,10 @@
 package org.apache.geode.internal.cache.backup;
 
 import java.io.File;
+import java.io.FileOutputStream;
 import java.io.IOException;
-import java.net.URISyntaxException;
 import java.net.URL;
 import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.nio.file.StandardCopyOption;
 import java.util.Collection;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -39,6 +36,7 @@
 import org.apache.geode.cache.persistence.PersistentID;
 import org.apache.geode.distributed.DistributedSystem;
 import org.apache.geode.distributed.internal.DM;
+import org.apache.geode.distributed.internal.DistributionConfig;
 import org.apache.geode.distributed.internal.MembershipListener;
 import 
org.apache.geode.distributed.internal.membership.InternalDistributedMember;
 import org.apache.geode.internal.ClassPathLoader;
@@ -50,45 +48,39 @@
 import org.apache.geode.internal.cache.GemFireCacheImpl;
 import org.apache.geode.internal.cache.InternalCache;
 import org.apache.geode.internal.cache.Oplog;
+import org.apache.geode.internal.i18n.LocalizedStrings;
 import org.apache.geode.internal.logging.LogService;
 
 /**
  * This class manages the state an logic to backup a single cache.
  */
-public class BackupManager {
+public class BackupManager implements MembershipListener {
   private static final Logger logger = LogService.getLogger();
 
   static final String INCOMPLETE_BACKUP_FILE = "INCOMPLETE_BACKUP_FILE";
 
   private static final String BACKUP_DIR_PREFIX = "dir";
+  private static final String README_FILE = "README_FILE.txt";
   private static final String DATA_STORES_DIRECTORY = "diskstores";
-  public static final String DATA_STORES_TEMPORARY_DIRECTORY = "backupTemp_";
   private static final String USER_FILES = "user";
   private static final String CONFIG_DIRECTORY = "config";
 
-  private final MembershipListener membershipListener = new 
BackupMembershipListener();
   private final Map<DiskStoreImpl, DiskStoreBackup> backupByDiskStore = new 
HashMap<>();
   private final RestoreScript restoreScript = new RestoreScript();
   private final InternalDistributedMember sender;
   private final InternalCache cache;
   private final CountDownLatch allowDestroys = new CountDownLatch(1);
-  private final BackupDefinition backupDefinition = new BackupDefinition();
-  private final String diskStoreDirectoryName;
   private volatile boolean isCancelled = false;
-  private Path tempDirectory;
-  private final Map<DiskStore, Map<DirectoryHolder, Path>> 
diskStoreDirTempDirsByDiskStore =
-      new HashMap<>();
 
   public BackupManager(InternalDistributedMember sender, InternalCache 
gemFireCache) {
     this.sender = sender;
     this.cache = gemFireCache;
-    diskStoreDirectoryName = DATA_STORES_TEMPORARY_DIRECTORY + 
System.currentTimeMillis();
   }
 
   public void validateRequestingAdmin() {
     // We need to watch for pure admin guys that depart. this 
allMembershipListener set
     // looks like it should receive those events.
-    Set allIds = 
getDistributionManager().addAllMembershipListenerAndGetAllIds(membershipListener);
+    Set allIds = 
getDistributionManager().addAllMembershipListenerAndGetAllIds(this);
     if (!allIds.contains(sender)) {
       cleanup();
       throw new IllegalStateException("The admin member requesting a backup 
has already departed");
@@ -114,30 +106,52 @@ public void validateRequestingAdmin() {
       if (abort) {
         return new HashSet<>();
       }
-      tempDirectory = Files.createTempDirectory("backup_" + 
System.currentTimeMillis());
+      HashSet<PersistentID> persistentIds = new HashSet<>();
       File backupDir = getBackupDir(targetDir);
 
-      // Make sure our baseline is okay for this member, then create inspector 
for baseline backup
+      // Make sure our baseline is okay for this member
       baselineDir = checkBaseline(baselineDir);
+
+      // Create an inspector for the baseline backup
       BackupInspector inspector =
           (baselineDir == null ? null : 
BackupInspector.createInspector(baselineDir));
+
       File storesDir = new File(backupDir, DATA_STORES_DIRECTORY);
       Collection<DiskStore> diskStores = 
cache.listDiskStoresIncludingRegionOwned();
+      Map<DiskStoreImpl, DiskStoreBackup> backupByDiskStore = new HashMap<>();
+
+      boolean foundPersistentData = false;
+      for (DiskStore store : diskStores) {
+        DiskStoreImpl diskStore = (DiskStoreImpl) store;
+        if (diskStore.hasPersistedData()) {
+          if (!foundPersistentData) {
+            createBackupDir(backupDir);
+            foundPersistentData = true;
+          }
+          File diskStoreDir = new File(storesDir, getBackupDirName(diskStore));
+          diskStoreDir.mkdir();
+          DiskStoreBackup backup = startDiskStoreBackup(diskStore, 
diskStoreDir, inspector);
+          backupByDiskStore.put(diskStore, backup);
+        }
+        diskStore.releaseBackupLock();
+      }
 
-      Map<DiskStoreImpl, DiskStoreBackup> backupByDiskStores =
-          startDiskStoreBackups(inspector, storesDir, diskStores);
       allowDestroys.countDown();
-      HashSet<PersistentID> persistentIds = 
finishDiskStoreBackups(backupByDiskStores);
 
-      if (!backupByDiskStores.isEmpty()) {
-        backupAdditionalFiles(backupDir);
-        backupDefinition.setRestoreScript(restoreScript);
+      for (Map.Entry<DiskStoreImpl, DiskStoreBackup> entry : 
backupByDiskStore.entrySet()) {
+        DiskStoreImpl diskStore = entry.getKey();
+        completeBackup(diskStore, entry.getValue());
+        diskStore.getStats().endBackup();
+        persistentIds.add(diskStore.getPersistentID());
       }
 
-      if (!backupByDiskStores.isEmpty()) {
-        // TODO: allow different stategies...
-        BackupDestination backupDestination = new 
FileSystemBackupDestination(backupDir.toPath());
-        backupDestination.backupFiles(backupDefinition);
+      if (!backupByDiskStore.isEmpty()) {
+        backupAdditionalFiles(backupDir);
+        restoreScript.generate(backupDir);
+        File incompleteFile = new File(backupDir, INCOMPLETE_BACKUP_FILE);
+        if (!incompleteFile.delete()) {
+          throw new IOException("Could not delete file " + 
INCOMPLETE_BACKUP_FILE);
+        }
       }
 
       return persistentIds;
@@ -147,50 +161,10 @@ public void validateRequestingAdmin() {
     }
   }
 
-  private HashSet<PersistentID> finishDiskStoreBackups(
-      Map<DiskStoreImpl, DiskStoreBackup> backupByDiskStores) throws 
IOException {
-    HashSet<PersistentID> persistentIds = new HashSet<>();
-    for (Map.Entry<DiskStoreImpl, DiskStoreBackup> entry : 
backupByDiskStores.entrySet()) {
-      DiskStoreImpl diskStore = entry.getKey();
-      completeBackup(diskStore, entry.getValue());
-      diskStore.getStats().endBackup();
-      persistentIds.add(diskStore.getPersistentID());
-    }
-    return persistentIds;
-  }
-
-  private Map<DiskStoreImpl, DiskStoreBackup> 
startDiskStoreBackups(BackupInspector inspector,
-      File storesDir, Collection<DiskStore> diskStores) throws IOException {
-    Map<DiskStoreImpl, DiskStoreBackup> backupByDiskStore = new HashMap<>();
-
-    for (DiskStore store : diskStores) {
-      DiskStoreImpl diskStore = (DiskStoreImpl) store;
-      if (diskStore.hasPersistedData()) {
-        File diskStoreDir = new File(storesDir, getBackupDirName(diskStore));
-        DiskStoreBackup backup = startDiskStoreBackup(diskStore, diskStoreDir, 
inspector);
-        backupByDiskStore.put(diskStore, backup);
-      }
-      diskStore.releaseBackupLock();
-    }
-    return backupByDiskStore;
-  }
-
   public void abort() {
     cleanup();
   }
 
-  public boolean isCancelled() {
-    return isCancelled;
-  }
-
-  public void waitForBackup() {
-    try {
-      allowDestroys.await();
-    } catch (InterruptedException e) {
-      throw new InternalGemFireError(e);
-    }
-  }
-
   private DM getDistributionManager() {
     return cache.getInternalDistributedSystem().getDistributionManager();
   }
@@ -198,33 +172,11 @@ private DM getDistributionManager() {
   private void cleanup() {
     isCancelled = true;
     allowDestroys.countDown();
-    cleanupTemporaryFiles();
     releaseBackupLocks();
-    getDistributionManager().removeAllMembershipListener(membershipListener);
+    getDistributionManager().removeAllMembershipListener(this);
     cache.clearBackupManager();
   }
 
-  private void cleanupTemporaryFiles() {
-    if (tempDirectory != null) {
-      try {
-        FileUtils.deleteDirectory(tempDirectory.toFile());
-      } catch (IOException e) {
-        logger.warn("Unable to delete temporary directory created during 
backup, " + tempDirectory,
-            e);
-      }
-    }
-    for (Map<DirectoryHolder, Path> diskStoreDirToTempDirMap : 
diskStoreDirTempDirsByDiskStore
-        .values()) {
-      for (Path tempDir : diskStoreDirToTempDirMap.values()) {
-        try {
-          FileUtils.deleteDirectory(tempDir.toFile());
-        } catch (IOException e) {
-          logger.warn("Unable to delete temporary directory created during 
backup, " + tempDir, e);
-        }
-      }
-    }
-  }
-
   private void releaseBackupLocks() {
     for (DiskStore store : cache.listDiskStoresIncludingRegionOwned()) {
       ((DiskStoreImpl) store).releaseBackupLock();
@@ -241,7 +193,9 @@ private void releaseBackupLocks() {
   private File findBaselineForThisMember(File baselineParentDir) {
     File baselineDir = null;
 
-    // Find the first matching DiskStoreId directory for this member.
+    /*
+     * Find the first matching DiskStoreId directory for this member.
+     */
     for (DiskStore diskStore : cache.listDiskStoresIncludingRegionOwned()) {
       File[] matchingFiles = baselineParentDir
           .listFiles((file, name) -> 
name.endsWith(getBackupDirName((DiskStoreImpl) diskStore)));
@@ -289,9 +243,10 @@ private File checkBaseline(File baselineParentDir) throws 
IOException {
   }
 
   private void backupAdditionalFiles(File backupDir) throws IOException {
-    backupConfigFiles();
+    backupConfigFiles(backupDir);
     backupUserFiles(backupDir);
     backupDeployedJars(backupDir);
+
   }
 
   /**
@@ -311,7 +266,12 @@ private void completeBackup(DiskStoreImpl diskStore, 
DiskStoreBackup backup) thr
         if (isCancelled()) {
           break;
         }
-        copyOplog(diskStore, tempDirectory.toFile(), oplog);
+        // Copy theoplog to the destination directory
+        int index = oplog.getDirectoryHolder().getArrayIndex();
+        File backupDir = getBackupDir(backup.getTargetDir(), index);
+        // TODO prpersist - We could probably optimize this to *move* the files
+        // that we know are supposed to be deleted.
+        backupOplog(backupDir, oplog);
 
         // Allow the oplog to be deleted, and process any pending delete
         backup.backupFinished(oplog);
@@ -372,15 +332,15 @@ private DiskStoreBackup 
startDiskStoreBackup(DiskStoreImpl diskStore, File targe
             logger.debug("snapshotting oplogs for disk store {}", 
diskStore.getName());
           }
 
-          addDiskStoreDirectoriesToRestoreScript(diskStore, targetDir);
+          createDiskStoreBackupDirs(diskStore, targetDir);
 
           
restoreScript.addExistenceTest(diskStore.getDiskInitFile().getIFFile());
 
           // Contains all oplogs that will backed up
+          Oplog[] allOplogs = null;
 
           // Incremental backup so filter out oplogs that have already been
           // backed up
-          Oplog[] allOplogs;
           if (null != baselineInspector) {
             allOplogs = filterBaselineOplogs(diskStore, baselineInspector);
           } else {
@@ -392,13 +352,9 @@ private DiskStoreBackup startDiskStoreBackup(DiskStoreImpl 
diskStore, File targe
           backup = new DiskStoreBackup(allOplogs, targetDir);
           backupByDiskStore.put(diskStore, backup);
 
-
-          // TODO cleanup new location definition code
-          /*
-           * Path diskstoreDir = getBackupDir(tempDir.toFile(),
-           * diskStore.getInforFileDirIndex()).toPath(); 
Files.createDirectories(diskstoreDir);
-           */
-          backupDiskInitFile(diskStore, tempDirectory);
+          // copy the init file
+          File firstDir = getBackupDir(targetDir, 
diskStore.getInforFileDirIndex());
+          diskStore.getDiskInitFile().copyTo(firstDir);
           diskStore.getPersistentOplogSet().forceRoll(null);
 
           if (logger.isDebugEnabled()) {
@@ -417,20 +373,15 @@ private DiskStoreBackup 
startDiskStoreBackup(DiskStoreImpl diskStore, File targe
     return backup;
   }
 
-  private void backupDiskInitFile(DiskStoreImpl diskStore, Path tempDir) 
throws IOException {
-    File diskInitFile = diskStore.getDiskInitFile().getIFFile();
-    String subDir = Integer.toString(diskStore.getInforFileDirIndex());
-    Files.createDirectories(tempDir.resolve(subDir));
-    Files.copy(diskInitFile.toPath(), 
tempDir.resolve(subDir).resolve(diskInitFile.getName()),
-        StandardCopyOption.COPY_ATTRIBUTES);
-    backupDefinition.addDiskInitFile(diskStore,
-        tempDir.resolve(subDir).resolve(diskInitFile.getName()));
-  }
-
-  private void addDiskStoreDirectoriesToRestoreScript(DiskStoreImpl diskStore, 
File targetDir) {
+  private void createDiskStoreBackupDirs(DiskStoreImpl diskStore, File 
targetDir)
+      throws IOException {
+    // Create the directories for this disk store
     DirectoryHolder[] directories = diskStore.getDirectoryHolders();
     for (int i = 0; i < directories.length; i++) {
       File backupDir = getBackupDir(targetDir, i);
+      if (!backupDir.mkdirs()) {
+        throw new IOException("Could not create directory " + backupDir);
+      }
       restoreScript.addFile(directories[i].getDir(), backupDir);
     }
   }
@@ -442,7 +393,8 @@ private void 
addDiskStoreDirectoriesToRestoreScript(DiskStoreImpl diskStore, Fil
    * @param baselineInspector the inspector for the previous backup.
    * @return an array of Oplogs to be copied for an incremental backup.
    */
-  private Oplog[] filterBaselineOplogs(DiskStoreImpl diskStore, 
BackupInspector baselineInspector) {
+  private Oplog[] filterBaselineOplogs(DiskStoreImpl diskStore, 
BackupInspector baselineInspector)
+      throws IOException {
     File baselineDir =
         new File(baselineInspector.getBackupDir(), 
BackupManager.DATA_STORES_DIRECTORY);
     baselineDir = new File(baselineDir, getBackupDirName(diskStore));
@@ -457,7 +409,9 @@ private void 
addDiskStoreDirectoriesToRestoreScript(DiskStoreImpl diskStore, Fil
     // Total list of member oplogs
     Oplog[] allOplogs = diskStore.getAllOplogsForBackup();
 
-    // Loop through operation logs and see if they are already part of the 
baseline backup.
+    /*
+     * Loop through operation logs and see if they are already part of the 
baseline backup.
+     */
     for (Oplog log : allOplogs) {
       // See if they are backed up in the current baseline
       Map<File, File> oplogMap = log.mapBaseline(baselineOplogFiles);
@@ -468,7 +422,9 @@ private void 
addDiskStoreDirectoriesToRestoreScript(DiskStoreImpl diskStore, Fil
       }
 
       if (oplogMap.isEmpty()) {
-        // These are fresh operation log files so lets back them up.
+        /*
+         * These are fresh operation log files so lets back them up.
+         */
         oplogList.add(log);
       } else {
         /*
@@ -499,42 +455,42 @@ private File getBackupDir(File targetDir, int index) {
     return new File(targetDir, BACKUP_DIR_PREFIX + index);
   }
 
-  private void backupConfigFiles() throws IOException {
-    Files.createDirectories(tempDirectory.resolve(CONFIG_DIRECTORY));
-    addConfigFileToBackup(cache.getCacheXmlURL());
-    addConfigFileToBackup(DistributedSystem.getPropertiesFileURL());
-    // TODO: should the gfsecurity.properties file be backed up?
-  }
+  private void backupConfigFiles(File backupDir) throws IOException {
+    File configBackupDir = new File(backupDir, CONFIG_DIRECTORY);
+    configBackupDir.mkdirs();
+    URL url = cache.getCacheXmlURL();
+    if (url != null) {
+      File cacheXMLBackup =
+          new File(configBackupDir, 
DistributionConfig.DEFAULT_CACHE_XML_FILE.getName());
+      FileUtils.copyFile(new File(cache.getCacheXmlURL().getFile()), 
cacheXMLBackup);
+    }
 
-  private void addConfigFileToBackup(URL fileUrl) throws IOException {
-    if (fileUrl != null) {
-      try {
-        Path source = Paths.get(fileUrl.toURI());
-        Path destination = 
tempDirectory.resolve(CONFIG_DIRECTORY).resolve(source.getFileName());
-        Files.copy(source, destination, StandardCopyOption.COPY_ATTRIBUTES);
-        backupDefinition.addConfigFileToBackup(destination);
-      } catch (URISyntaxException e) {
-        throw new IOException(e);
-      }
+    URL propertyURL = DistributedSystem.getPropertiesFileURL();
+    if (propertyURL != null) {
+      File propertyBackup =
+          new File(configBackupDir, DistributionConfig.GEMFIRE_PREFIX + 
"properties");
+      FileUtils.copyFile(new File(DistributedSystem.getPropertiesFile()), 
propertyBackup);
     }
+
+    // TODO: should the gfsecurity.properties file be backed up?
   }
 
   private void backupUserFiles(File backupDir) throws IOException {
-    Files.createDirectories(tempDirectory.resolve(USER_FILES));
     List<File> backupFiles = cache.getBackupFiles();
     File userBackupDir = new File(backupDir, USER_FILES);
+    if (!userBackupDir.exists()) {
+      userBackupDir.mkdir();
+    }
     for (File original : backupFiles) {
       if (original.exists()) {
         original = original.getAbsoluteFile();
-        Path destination = 
tempDirectory.resolve(USER_FILES).resolve(original.getName());
+        File dest = new File(userBackupDir, original.getName());
+        restoreScript.addUserFile(original, dest);
         if (original.isDirectory()) {
-          FileUtils.copyDirectory(original, destination.toFile());
+          FileUtils.copyDirectory(original, dest);
         } else {
-          Files.copy(original.toPath(), destination, 
StandardCopyOption.COPY_ATTRIBUTES);
+          FileUtils.copyFile(original, dest);
         }
-        backupDefinition.addUserFilesToBackup(destination);
-        File restoreScriptDestination = new File(userBackupDir, 
original.getName());
-        restoreScript.addUserFile(original, restoreScriptDestination);
       }
     }
   }
@@ -549,34 +505,41 @@ private void backupDeployedJars(File backupDir) throws 
IOException {
     JarDeployer deployer = null;
 
     try {
-      // Suspend any user deployed jar file updates during this backup.
+      /*
+       * Suspend any user deployed jar file updates during this backup.
+       */
       deployer = ClassPathLoader.getLatest().getJarDeployer();
       deployer.suspendAll();
 
       List<DeployedJar> jarList = deployer.findDeployedJars();
       if (!jarList.isEmpty()) {
         File userBackupDir = new File(backupDir, USER_FILES);
+        if (!userBackupDir.exists()) {
+          userBackupDir.mkdir();
+        }
 
-        for (DeployedJar jar : jarList) {
-          File source = new File(jar.getFileCanonicalPath());
-          String sourceFileName = source.getName();
-          Path destination = 
tempDirectory.resolve(USER_FILES).resolve(sourceFileName);
-          Files.copy(source.toPath(), destination, 
StandardCopyOption.COPY_ATTRIBUTES);
-          backupDefinition.addDeployedJarToBackup(destination);
-
-          File restoreScriptDestination = new File(userBackupDir, 
sourceFileName);
-          restoreScript.addFile(source, restoreScriptDestination);
+        for (DeployedJar loader : jarList) {
+          File source = new File(loader.getFileCanonicalPath());
+          File dest = new File(userBackupDir, source.getName());
+          restoreScript.addFile(source, dest);
+          if (source.isDirectory()) {
+            FileUtils.copyDirectory(source, dest);
+          } else {
+            FileUtils.copyFile(source, dest);
+          }
         }
       }
     } finally {
-      // Re-enable user deployed jar file updates.
-      if (deployer != null) {
+      /*
+       * Re-enable user deployed jar file updates.
+       */
+      if (null != deployer) {
         deployer.resumeAll();
       }
     }
   }
 
-  private File getBackupDir(File targetDir) {
+  private File getBackupDir(File targetDir) throws IOException {
     InternalDistributedMember memberId =
         cache.getInternalDistributedSystem().getDistributedMember();
     String vmId = memberId.toString();
@@ -584,22 +547,47 @@ private File getBackupDir(File targetDir) {
     return new File(targetDir, vmId);
   }
 
-  private void copyOplog(DiskStore diskStore, File targetDir, Oplog oplog) 
throws IOException {
-    DirectoryHolder dirHolder = oplog.getDirectoryHolder();
-    backupFile(diskStore, dirHolder, targetDir, oplog.getCrfFile());
-    backupFile(diskStore, dirHolder, targetDir, oplog.getDrfFile());
+  private void createBackupDir(File backupDir) throws IOException {
+    if (backupDir.exists()) {
+      throw new IOException("Backup directory " + backupDir.getAbsolutePath() 
+ " already exists.");
+    }
+
+    if (!backupDir.mkdirs()) {
+      throw new IOException("Could not create directory: " + backupDir);
+    }
+
+    File incompleteFile = new File(backupDir, INCOMPLETE_BACKUP_FILE);
+    if (!incompleteFile.createNewFile()) {
+      throw new IOException("Could not create file: " + incompleteFile);
+    }
+
+    File readme = new File(backupDir, README_FILE);
+    FileOutputStream fos = new FileOutputStream(readme);
+
+    try {
+      String text = LocalizedStrings.BackupManager_README.toLocalizedString();
+      fos.write(text.getBytes());
+    } finally {
+      fos.close();
+    }
+  }
+
+  private void backupOplog(File targetDir, Oplog oplog) throws IOException {
+    File crfFile = oplog.getCrfFile();
+    backupFile(targetDir, crfFile);
+
+    File drfFile = oplog.getDrfFile();
+    backupFile(targetDir, drfFile);
 
     oplog.finishKrf();
-    backupFile(diskStore, dirHolder, targetDir, oplog.getKrfFile());
+    File krfFile = oplog.getKrfFile();
+    backupFile(targetDir, krfFile);
   }
 
-  private void backupFile(DiskStore diskStore, DirectoryHolder dirHolder, File 
targetDir, File file)
-      throws IOException {
+  private void backupFile(File targetDir, File file) throws IOException {
     if (file != null && file.exists()) {
       try {
-        Path tempDiskDir = getTempDirForDiskStore(diskStore, dirHolder);
-        Files.createLink(tempDiskDir.resolve(file.getName()), file.toPath());
-        backupDefinition.addOplogFileToBackup(diskStore, 
tempDiskDir.resolve(file.getName()));
+        Files.createLink(targetDir.toPath().resolve(file.getName()), 
file.toPath());
       } catch (IOException | UnsupportedOperationException e) {
         logger.warn("Unable to create hard link for + {}. Reverting to file 
copy", targetDir);
         FileUtils.copyFileToDirectory(file, targetDir);
@@ -607,55 +595,35 @@ private void backupFile(DiskStore diskStore, 
DirectoryHolder dirHolder, File tar
     }
   }
 
-  private Path getTempDirForDiskStore(DiskStore diskStore, DirectoryHolder 
dirHolder)
-      throws IOException {
-    Map<DirectoryHolder, Path> tempDirByDirectoryHolder =
-        diskStoreDirTempDirsByDiskStore.get(diskStore);
-    if (tempDirByDirectoryHolder == null) {
-      tempDirByDirectoryHolder = new HashMap<>();
-      diskStoreDirTempDirsByDiskStore.put(diskStore, tempDirByDirectoryHolder);
-    }
-    Path directory = tempDirByDirectoryHolder.get(dirHolder);
-    if (directory != null) {
-      return directory;
-    }
-
-    File diskStoreDir = dirHolder.getDir();
-    directory = diskStoreDir.toPath().resolve(diskStoreDirectoryName);
-    Files.createDirectories(directory);
-    tempDirByDirectoryHolder.put(dirHolder, directory);
-    return directory;
-  }
-
   private String cleanSpecialCharacters(String string) {
     return string.replaceAll("[^\\w]+", "_");
   }
 
-  public DiskStoreBackup getBackupForDiskStore(DiskStoreImpl diskStore) {
-    return backupByDiskStore.get(diskStore);
+  public void memberDeparted(InternalDistributedMember id, boolean crashed) {
+    cleanup();
   }
 
-  private class BackupMembershipListener implements MembershipListener {
-    @Override
-    public void memberDeparted(InternalDistributedMember id, boolean crashed) {
-      cleanup();
-    }
+  public void memberJoined(InternalDistributedMember id) {}
 
-    @Override
-    public void memberJoined(InternalDistributedMember id) {
-      // unused
-    }
+  public void quorumLost(Set<InternalDistributedMember> failures,
+      List<InternalDistributedMember> remaining) {}
 
-    @Override
-    public void quorumLost(Set<InternalDistributedMember> failures,
-        List<InternalDistributedMember> remaining) {
-      // unused
-    }
+  public void memberSuspect(InternalDistributedMember id, 
InternalDistributedMember whoSuspected,
+      String reason) {}
 
-    @Override
-    public void memberSuspect(InternalDistributedMember id, 
InternalDistributedMember whoSuspected,
-        String reason) {
-      // unused
+  public void waitForBackup() {
+    try {
+      allowDestroys.await();
+    } catch (InterruptedException e) {
+      throw new InternalGemFireError(e);
     }
   }
+
+  public boolean isCancelled() {
+    return isCancelled;
+  }
+
+  public DiskStoreBackup getBackupForDiskStore(DiskStoreImpl diskStore) {
+    return backupByDiskStore.get(diskStore);
+  }
 }
diff --git 
a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/FileSystemBackupDestination.java
 
b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/FileSystemBackupDestination.java
deleted file mode 100644
index 997ab10323..0000000000
--- 
a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/FileSystemBackupDestination.java
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more 
contributor license
- * agreements. See the NOTICE file distributed with this work for additional 
information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache 
License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the 
License. You may obtain a
- * copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software 
distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 
KIND, either express
- * or implied. See the License for the specific language governing permissions 
and limitations under
- * the License.
- */
-package org.apache.geode.internal.cache.backup;
-
-import java.io.File;
-import java.io.IOException;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.StandardCopyOption;
-import java.util.Collection;
-import java.util.Map;
-
-import org.apache.commons.io.FileUtils;
-
-import org.apache.geode.cache.DiskStore;
-import org.apache.geode.internal.cache.DiskStoreImpl;
-import org.apache.geode.internal.cache.GemFireCacheImpl;
-import org.apache.geode.internal.i18n.LocalizedStrings;
-
-public class FileSystemBackupDestination implements BackupDestination {
-  static final String INCOMPLETE_BACKUP_FILE = "INCOMPLETE_BACKUP_FILE";
-
-  private final Path backupDir;
-
-  FileSystemBackupDestination(Path backupDir) {
-    this.backupDir = backupDir;
-  }
-
-  @Override
-  public void backupFiles(BackupDefinition backupDefinition) throws 
IOException {
-    Files.createDirectories(backupDir);
-    Files.createFile(backupDir.resolve(INCOMPLETE_BACKUP_FILE));
-    backupAllFilesets(backupDefinition);
-    Files.delete(backupDir.resolve(INCOMPLETE_BACKUP_FILE));
-  }
-
-  private void backupAllFilesets(BackupDefinition backupDefinition) throws 
IOException {
-    backupUserFiles(backupDefinition.getUserFiles());
-    backupDeployedJars(backupDefinition.getDeployedJars());
-    backupConfigFiles(backupDefinition.getConfigFiles());
-    backupOplogs(backupDefinition.getOplogFilesByDiskStore());
-    backupDiskInitFiles(backupDefinition.getDiskInitFiles());
-    RestoreScript script = backupDefinition.getRestoreScript();
-    if (script != null) {
-      File scriptFile = script.generate(backupDir.toFile());
-      backupRestoreScript(scriptFile.toPath());
-    }
-    writeReadMe();
-  }
-
-  private void writeReadMe() throws IOException {
-    String text = LocalizedStrings.BackupManager_README.toLocalizedString();
-    Files.write(backupDir.resolve(README_FILE), text.getBytes());
-  }
-
-  private void backupRestoreScript(Path restoreScriptFile) throws IOException {
-    Files.copy(restoreScriptFile, 
backupDir.resolve(restoreScriptFile.getFileName()));
-  }
-
-  private void backupDiskInitFiles(Map<DiskStore, Path> diskInitFiles) throws 
IOException {
-    for (Map.Entry<DiskStore, Path> entry : diskInitFiles.entrySet()) {
-      Path destinationDirectory = getOplogBackupDir(entry.getKey(),
-          ((DiskStoreImpl) entry.getKey()).getInforFileDirIndex());
-      
Files.createDirectories(destinationDirectory.resolve(destinationDirectory));
-      Files.copy(entry.getValue(), 
destinationDirectory.resolve(destinationDirectory)
-          .resolve(entry.getValue().getFileName()), 
StandardCopyOption.COPY_ATTRIBUTES);
-    }
-  }
-
-  private void backupUserFiles(Collection<Path> userFiles) throws IOException {
-    Path userDirectory = backupDir.resolve(USER_FILES_DIRECTORY);
-    Files.createDirectories(userDirectory);
-    moveFilesOrDirectories(userFiles, userDirectory);
-  }
-
-  private void backupDeployedJars(Collection<Path> jarFiles) throws 
IOException {
-    Path jarsDirectory = backupDir.resolve(DEPLOYED_JARS_DIRECTORY);
-    Files.createDirectories(jarsDirectory);
-    moveFilesOrDirectories(jarFiles, jarsDirectory);
-  }
-
-  private void backupConfigFiles(Collection<Path> configFiles) throws 
IOException {
-    Path configDirectory = backupDir.resolve(CONFIG_DIRECTORY);
-    Files.createDirectories(configDirectory);
-    moveFilesOrDirectories(configFiles, configDirectory);
-  }
-
-  private void backupOplogs(Map<DiskStore, Collection<Path>> oplogFiles) 
throws IOException {
-    for (Map.Entry<DiskStore, Collection<Path>> entry : oplogFiles.entrySet()) 
{
-      for (Path path : entry.getValue()) {
-        int index = ((DiskStoreImpl) entry.getKey()).getInforFileDirIndex();
-        Path backupDir = createOplogBackupDir(entry.getKey(), index);
-        backupOplog(backupDir, path);
-      }
-    }
-  }
-
-  private Path getOplogBackupDir(DiskStore diskStore, int index) {
-    String name = diskStore.getName();
-    if (name == null) {
-      name = GemFireCacheImpl.getDefaultDiskStoreName();
-    }
-    name = name + "_" + ((DiskStoreImpl) 
diskStore).getDiskStoreID().toString();
-    return backupDir.resolve(DATA_STORES_DIRECTORY).resolve(name)
-        .resolve(BACKUP_DIR_PREFIX + index);
-  }
-
-  private Path createOplogBackupDir(DiskStore diskStore, int index) throws 
IOException {
-    Path oplogBackupDir = getOplogBackupDir(diskStore, index);
-    Files.createDirectories(oplogBackupDir);
-    return oplogBackupDir;
-  }
-
-  private void backupOplog(Path targetDir, Path path) throws IOException {
-    backupFile(targetDir, path.toFile());
-  }
-
-  private void backupFile(Path targetDir, File file) throws IOException {
-    Files.move(file.toPath(), targetDir.resolve(file.getName()));
-  }
-
-  private void moveFilesOrDirectories(Collection<Path> paths, Path 
targetDirectory)
-      throws IOException {
-    for (Path userFile : paths) {
-      Path destination = targetDirectory.resolve(userFile.getFileName());
-      if (Files.isDirectory(userFile)) {
-        FileUtils.moveDirectory(userFile.toFile(), destination.toFile());
-      } else {
-        Files.move(userFile, destination);
-      }
-    }
-  }
-}
diff --git 
a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/FlushToDiskFactory.java
 
b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/FlushToDiskFactory.java
index d5dfc9a327..9d919973db 100644
--- 
a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/FlushToDiskFactory.java
+++ 
b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/FlushToDiskFactory.java
@@ -14,8 +14,10 @@
  */
 package org.apache.geode.internal.cache.backup;
 
+import java.util.HashSet;
 import java.util.Set;
 
+import org.apache.geode.cache.persistence.PersistentID;
 import org.apache.geode.distributed.internal.DM;
 import 
org.apache.geode.distributed.internal.membership.InternalDistributedMember;
 import org.apache.geode.internal.cache.InternalCache;
diff --git 
a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/FlushToDiskOperation.java
 
b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/FlushToDiskOperation.java
index 76fa279169..afeda08b2d 100644
--- 
a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/FlushToDiskOperation.java
+++ 
b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/FlushToDiskOperation.java
@@ -19,11 +19,14 @@
 import org.apache.logging.log4j.Logger;
 
 import org.apache.geode.CancelException;
+import org.apache.geode.cache.DiskStore;
 import org.apache.geode.distributed.internal.DM;
 import org.apache.geode.distributed.internal.DistributionMessage;
 import org.apache.geode.distributed.internal.ReplyException;
 import org.apache.geode.distributed.internal.ReplyProcessor21;
 import 
org.apache.geode.distributed.internal.membership.InternalDistributedMember;
+import org.apache.geode.internal.admin.remote.AdminResponse;
+import org.apache.geode.internal.admin.remote.CliLegacyMessage;
 import org.apache.geode.internal.cache.InternalCache;
 import org.apache.geode.internal.logging.LogService;
 
diff --git 
a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/RestoreScript.java
 
b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/RestoreScript.java
index a166e93bc2..cd9d1600d3 100644
--- 
a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/RestoreScript.java
+++ 
b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/RestoreScript.java
@@ -77,7 +77,7 @@ public void addExistenceTest(final File originalFile) {
     existenceTests.add(originalFile.getAbsoluteFile());
   }
 
-  public File generate(final File outputDir) throws IOException {
+  public void generate(final File outputDir) throws IOException {
     File outputFile = new File(outputDir, generator.getScriptName());
     try (BufferedWriter writer = Files.newBufferedWriter(outputFile.toPath())) 
{
 
@@ -90,7 +90,6 @@ public File generate(final File outputDir) throws IOException 
{
     }
 
     outputFile.setExecutable(true, true);
-    return outputFile;
   }
 
   private void writePreamble(BufferedWriter writer) throws IOException {
diff --git 
a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/UnixScriptGenerator.java
 
b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/UnixScriptGenerator.java
index 4cc22e25e7..ee9a53d7f5 100644
--- 
a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/UnixScriptGenerator.java
+++ 
b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/UnixScriptGenerator.java
@@ -18,6 +18,9 @@
 import java.io.File;
 import java.io.IOException;
 
+import org.apache.geode.internal.cache.backup.RestoreScript;
+import org.apache.geode.internal.cache.backup.ScriptGenerator;
+
 class UnixScriptGenerator implements ScriptGenerator {
 
   private static final String SCRIPT_FILE_NAME = "restore.sh";
diff --git 
a/geode-core/src/test/java/org/apache/geode/internal/cache/DiskStoreImplIntegrationTest.java
 
b/geode-core/src/test/java/org/apache/geode/internal/cache/DiskStoreImplIntegrationTest.java
deleted file mode 100644
index 8e9cf6d5b2..0000000000
--- 
a/geode-core/src/test/java/org/apache/geode/internal/cache/DiskStoreImplIntegrationTest.java
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more 
contributor license
- * agreements. See the NOTICE file distributed with this work for additional 
information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache 
License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the 
License. You may obtain a
- * copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software 
distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 
KIND, either express
- * or implied. See the License for the specific language governing permissions 
and limitations under
- * the License.
- */
-package org.apache.geode.internal.cache;
-
-import static org.assertj.core.api.Assertions.assertThat;
-
-import java.io.File;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-import org.junit.rules.TemporaryFolder;
-
-import org.apache.geode.cache.Cache;
-import org.apache.geode.cache.CacheFactory;
-import org.apache.geode.cache.DiskStore;
-import org.apache.geode.cache.RegionShortcut;
-import org.apache.geode.distributed.ConfigurationProperties;
-import org.apache.geode.internal.cache.backup.BackupManager;
-import org.apache.geode.test.junit.categories.IntegrationTest;
-
-@Category(IntegrationTest.class)
-public class DiskStoreImplIntegrationTest {
-  private static final String DISK_STORE_NAME = "testDiskStore";
-  private static final String REGION_NAME = "testRegion";
-
-  @Rule
-  public TemporaryFolder temporaryDirectory = new TemporaryFolder();
-
-  private Cache cache;
-
-  @Before
-  public void setup() {
-    cache = createCache();
-  }
-
-  @After
-  public void tearDown() {
-    if (cache != null && !cache.isClosed()) {
-      cache.close();
-    }
-  }
-
-  @Test
-  public void cleansUpOrphanedBackupFilesOnDiskStoreCreation() throws 
Exception {
-    File baseDir = temporaryDirectory.newFolder();
-    createRegionWithDiskStore(baseDir);
-    DiskStore diskStore = cache.findDiskStore(DISK_STORE_NAME);
-
-    List<Path> tempDirs = new ArrayList<>();
-    for (File diskDir : diskStore.getDiskDirs()) {
-      Path tempDir =
-          
diskDir.toPath().resolve(BackupManager.DATA_STORES_TEMPORARY_DIRECTORY + 
"testing");
-      Files.createDirectories(tempDir);
-      tempDirs.add(tempDir);
-    }
-
-    cache.close();
-    cache = createCache();
-    createRegionWithDiskStore(baseDir);
-
-    tempDirs.forEach(tempDir -> assertThat(Files.exists(tempDir)).isFalse());
-  }
-
-  private void createRegionWithDiskStore(File baseDir) {
-    cache.createDiskStoreFactory().setDiskDirs(new File[] 
{baseDir}).create(DISK_STORE_NAME);
-    cache.<String, 
String>createRegionFactory(RegionShortcut.PARTITION_PERSISTENT)
-        .setDiskStoreName(DISK_STORE_NAME).create(REGION_NAME);
-  }
-
-  private Cache createCache() {
-    // Setting MCAST port explicitly is currently required due to default 
properties set in gradle
-    return new CacheFactory().set(ConfigurationProperties.MCAST_PORT, 
"0").create();
-  }
-}
diff --git 
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/BackupDefinitionTest.java
 
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/BackupDefinitionTest.java
deleted file mode 100644
index 5ca9a2b19e..0000000000
--- 
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/BackupDefinitionTest.java
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more 
contributor license
- * agreements. See the NOTICE file distributed with this work for additional 
information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache 
License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the 
License. You may obtain a
- * copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software 
distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 
KIND, either express
- * or implied. See the License for the specific language governing permissions 
and limitations under
- * the License.
- */
-package org.apache.geode.internal.cache.backup;
-
-import static org.assertj.core.api.Assertions.assertThat;
-import static org.assertj.core.api.Assertions.assertThatThrownBy;
-import static org.mockito.Mockito.mock;
-
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.util.Collections;
-
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import org.apache.geode.cache.DiskStore;
-import org.apache.geode.test.junit.categories.UnitTest;
-
-@Category(UnitTest.class)
-public class BackupDefinitionTest {
-
-  private BackupDefinition backupDefinition = new BackupDefinition();
-
-  @Test
-  public void hasNoFilesWhenInitialized() {
-    assertThat(backupDefinition.getConfigFiles()).isEmpty();
-    assertThat(backupDefinition.getDeployedJars()).isEmpty();
-    assertThat(backupDefinition.getUserFiles()).isEmpty();
-    assertThat(backupDefinition.getOplogFilesByDiskStore()).isEmpty();
-    assertThat(backupDefinition.getDiskInitFiles()).isEmpty();
-    assertThat(backupDefinition.getRestoreScript()).isNull();
-  }
-
-  @Test
-  public void returnsNonModifiableCollections() {
-    Path cannotBeAdded = Paths.get("");
-    assertThatThrownBy(() -> 
backupDefinition.getConfigFiles().add(cannotBeAdded))
-        .isInstanceOf(UnsupportedOperationException.class);
-    assertThatThrownBy(() -> 
backupDefinition.getDeployedJars().add(cannotBeAdded))
-        .isInstanceOf(UnsupportedOperationException.class);
-    assertThatThrownBy(() -> 
backupDefinition.getUserFiles().add(cannotBeAdded))
-        .isInstanceOf(UnsupportedOperationException.class);
-    assertThatThrownBy(() -> 
backupDefinition.getOplogFilesByDiskStore().put(mock(DiskStore.class),
-        
Collections.emptySet())).isInstanceOf(UnsupportedOperationException.class);
-    assertThatThrownBy(
-        () -> backupDefinition.getDiskInitFiles().put(mock(DiskStore.class), 
cannotBeAdded))
-            .isInstanceOf(UnsupportedOperationException.class);
-  }
-
-  @Test
-  public void containsConfigFilesAdded() {
-    Path config1 = Paths.get("config1");
-    Path config2 = Paths.get("config2");
-    backupDefinition.addConfigFileToBackup(config1);
-    backupDefinition.addConfigFileToBackup(config2);
-    assertThat(backupDefinition.getConfigFiles()).containsOnly(config1, 
config2);
-  }
-
-  @Test
-  public void containsDeployedJarFilesAdded() {
-    Path jar1 = Paths.get("jar1");
-    Path jar2 = Paths.get("jar2");
-    backupDefinition.addDeployedJarToBackup(jar1);
-    backupDefinition.addDeployedJarToBackup(jar2);
-    assertThat(backupDefinition.getDeployedJars()).containsOnly(jar1, jar2);
-  }
-
-  @Test
-  public void containsUserFilesAdded() {
-    Path userFile1 = Paths.get("userFile1");
-    Path userFile2 = Paths.get("userFile2");
-    backupDefinition.addUserFilesToBackup(userFile1);
-    backupDefinition.addUserFilesToBackup(userFile2);
-    assertThat(backupDefinition.getUserFiles()).containsOnly(userFile1, 
userFile2);
-  }
-
-  @Test
-  public void containsAllAddedOplogFilesAdded() {
-    DiskStore diskStore = mock(DiskStore.class);
-    Path file1 = mock(Path.class);
-    Path file2 = mock(Path.class);
-    backupDefinition.addOplogFileToBackup(diskStore, file1);
-    
assertThat(backupDefinition.getOplogFilesByDiskStore()).containsEntry(diskStore,
-        Collections.singleton(file1));
-    backupDefinition.addOplogFileToBackup(diskStore, file2);
-    assertThat(backupDefinition.getOplogFilesByDiskStore().get(diskStore))
-        .containsExactlyInAnyOrder(file1, file2);
-  }
-
-  @Test
-  public void containsAllDiskInitFiles() {
-    DiskStore diskStore1 = mock(DiskStore.class);
-    DiskStore diskStore2 = mock(DiskStore.class);
-    Path diskInit1 = Paths.get("diskInit1");
-    Path diskInit2 = Paths.get("diskInit2");
-    backupDefinition.addDiskInitFile(diskStore1, diskInit1);
-    backupDefinition.addDiskInitFile(diskStore2, diskInit2);
-    
assertThat(backupDefinition.getDiskInitFiles()).hasSize(2).containsValues(diskInit1,
 diskInit2);
-  }
-
-  @Test
-  public void hasSetRestoreScript() {
-    RestoreScript restoreScript = new RestoreScript();
-    backupDefinition.setRestoreScript(restoreScript);
-    assertThat(backupDefinition.getRestoreScript()).isSameAs(restoreScript);
-  }
-}
diff --git 
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/BackupDistributedTest.java
 
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/BackupDistributedTest.java
index a5ffba5e18..77ee71952d 100755
--- 
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/BackupDistributedTest.java
+++ 
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/BackupDistributedTest.java
@@ -14,25 +14,26 @@
  */
 package org.apache.geode.internal.cache.backup;
 
-import static org.apache.commons.io.FileUtils.listFiles;
-import static org.apache.commons.io.filefilter.DirectoryFileFilter.DIRECTORY;
 import static org.apache.geode.test.dunit.Host.getHost;
 import static org.apache.geode.test.dunit.IgnoredException.addIgnoredException;
+import static org.apache.geode.test.dunit.Invoke.invokeInEveryVM;
 import static org.assertj.core.api.Assertions.assertThat;
 import static org.assertj.core.api.Assertions.assertThatThrownBy;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.io.BufferedReader;
 import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileReader;
 import java.io.IOException;
-import java.io.InputStreamReader;
+import java.io.PrintStream;
+import java.io.UncheckedIOException;
 import java.nio.file.Files;
 import java.util.ArrayList;
 import java.util.Collection;
-import java.util.HashMap;
-import java.util.Map;
 import java.util.Set;
 import java.util.TreeSet;
 import java.util.concurrent.CompletableFuture;
@@ -44,27 +45,22 @@
 import junitparams.Parameters;
 import junitparams.naming.TestCaseName;
 import org.apache.commons.io.FileUtils;
-import org.apache.commons.io.filefilter.RegexFileFilter;
-import org.apache.logging.log4j.Logger;
 import org.junit.Before;
-import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.runner.RunWith;
 
 import org.apache.geode.cache.Cache;
-import org.apache.geode.cache.CacheClosedException;
+import org.apache.geode.cache.DataPolicy;
 import org.apache.geode.cache.DiskStore;
+import org.apache.geode.cache.DiskStoreFactory;
 import org.apache.geode.cache.EvictionAction;
 import org.apache.geode.cache.EvictionAttributes;
 import org.apache.geode.cache.PartitionAttributesFactory;
-import org.apache.geode.cache.PartitionedRegionStorageException;
 import org.apache.geode.cache.Region;
 import org.apache.geode.cache.RegionFactory;
-import org.apache.geode.cache.RegionShortcut;
 import org.apache.geode.cache.control.RebalanceOperation;
 import org.apache.geode.cache.control.RebalanceResults;
-import org.apache.geode.cache.persistence.PartitionOfflineException;
 import org.apache.geode.distributed.DistributedMember;
 import org.apache.geode.distributed.internal.DistributionManager;
 import org.apache.geode.distributed.internal.DistributionMessage;
@@ -72,16 +68,17 @@
 import org.apache.geode.distributed.internal.ReplyMessage;
 import org.apache.geode.internal.admin.remote.AdminFailureResponse;
 import 
org.apache.geode.internal.cache.DestroyRegionOperation.DestroyRegionMessage;
+import org.apache.geode.internal.cache.DiskRegion;
 import org.apache.geode.internal.cache.GemFireCacheImpl;
 import org.apache.geode.internal.cache.PartitionedRegion;
 import 
org.apache.geode.internal.cache.partitioned.PersistentPartitionedRegionTestBase;
-import org.apache.geode.internal.logging.LogService;
 import org.apache.geode.management.BackupStatus;
-import org.apache.geode.management.ManagementException;
 import org.apache.geode.test.dunit.AsyncInvocation;
+import org.apache.geode.test.dunit.DUnitEnv;
+import org.apache.geode.test.dunit.SerializableCallable;
+import org.apache.geode.test.dunit.SerializableRunnable;
 import org.apache.geode.test.dunit.VM;
 import org.apache.geode.test.junit.categories.DistributedTest;
-import 
org.apache.geode.test.junit.rules.serializable.SerializableTemporaryFolder;
 
 /**
  * Additional tests to consider adding:
@@ -95,19 +92,11 @@
 @RunWith(JUnitParamsRunner.class)
 @SuppressWarnings("serial")
 public class BackupDistributedTest extends PersistentPartitionedRegionTestBase 
{
-  private static final Logger logger = LogService.getLogger();
-
-  private static final int NUM_BUCKETS = 15;
-
-  @Rule
-  public SerializableTemporaryFolder tempDir = new 
SerializableTemporaryFolder();
 
   private VM vm0;
   private VM vm1;
   private VM vm2;
   private VM vm3;
-  private Map<VM, File> workingDirByVm;
-  private File backupBaseDir;
 
   @Before
   public void setUp() throws Exception {
@@ -115,15 +104,6 @@ public void setUp() throws Exception {
     vm1 = getHost(0).getVM(1);
     vm2 = getHost(0).getVM(2);
     vm3 = getHost(0).getVM(3);
-
-    workingDirByVm = new HashMap<>();
-    workingDirByVm.put(vm0, tempDir.newFolder());
-    workingDirByVm.put(vm1, tempDir.newFolder());
-    workingDirByVm.put(vm2, tempDir.newFolder());
-    workingDirByVm.put(vm3, tempDir.newFolder());
-
-    backupBaseDir = tempDir.newFolder("backupDir");
-
   }
 
   @Override
@@ -132,22 +112,30 @@ public final void preTearDownCacheTestCase() throws 
Exception {
       DistributionMessageObserver.setInstance(null);
       disconnectFromDS();
     });
+
+    StringBuilder failures = new StringBuilder();
+    delete(getBackupDir(), failures);
+    if (failures.length() > 0) {
+      // logger.error(failures.toString());
+    }
   }
 
   @Test
   public void testBackupPR() throws Exception {
-    createPersistentRegions();
+    createPersistentRegion(vm0);
+    createPersistentRegion(vm1);
 
-    long lastModified0 = setBackupFiles(vm0);
-    long lastModified1 = setBackupFiles(vm1);
+    long lm0 = setBackupFiles(vm0);
+    long lm1 = setBackupFiles(vm1);
 
-    createData();
+    createData(vm0, 0, 5, "A", "region1");
+    createData(vm0, 0, 5, "B", "region2");
 
-    BackupStatus status = backupMember(vm2);
+    BackupStatus status = backup(vm2);
     assertThat(status.getBackedUpDiskStores()).hasSize(2);
     assertThat(status.getOfflineDiskStores()).isEmpty();
 
-    Collection<File> files = FileUtils.listFiles(backupBaseDir, new String[] 
{"txt"}, true);
+    Collection<File> files = FileUtils.listFiles(getBackupDir(), new String[] 
{"txt"}, true);
     assertThat(files).hasSize(4);
 
     deleteOldUserUserFile(vm0);
@@ -168,17 +156,12 @@ public void testBackupPR() throws Exception {
 
     restoreBackup(2);
 
-    createPersistentRegions();
+    createPersistentRegionsAsync();
 
     checkData(vm0, 0, 5, "A", "region1");
     checkData(vm0, 0, 5, "B", "region2");
-    verifyUserFileRestored(vm0, lastModified0);
-    verifyUserFileRestored(vm1, lastModified1);
-  }
-
-  private void createData() {
-    createData(vm0, 0, 5, "A", "region1");
-    createData(vm0, 0, 5, "B", "region2");
+    verifyUserFileRestored(vm0, lm0);
+    verifyUserFileRestored(vm1, lm1);
   }
 
   /**
@@ -190,11 +173,13 @@ private void createData() {
    */
   @Test
   public void testBackupFromMemberWithDiskStore() throws Exception {
-    createPersistentRegions();
+    createPersistentRegion(vm0);
+    createPersistentRegion(vm1);
 
-    createData();
+    createData(vm0, 0, 5, "A", "region1");
+    createData(vm0, 0, 5, "B", "region2");
 
-    BackupStatus status = backupMember(vm1);
+    BackupStatus status = backup(vm1);
     assertThat(status.getBackedUpDiskStores()).hasSize(2);
 
     for (DistributedMember key : status.getBackedUpDiskStores().keySet()) {
@@ -208,11 +193,11 @@ public void testBackupFromMemberWithDiskStore() throws 
Exception {
     closeCache(vm1);
 
     // destroy the current data
-    cleanDiskDirsInEveryVM();
+    invokeInEveryVM("cleanDiskDirs", () -> cleanDiskDirs());
 
     restoreBackup(2);
 
-    createPersistentRegions();
+    createPersistentRegionsAsync();
 
     checkData(vm0, 0, 5, "A", "region1");
     checkData(vm0, 0, 5, "B", "region2");
@@ -227,16 +212,16 @@ public void testBackupFromMemberWithDiskStore() throws 
Exception {
    */
   @Test
   public void testBackupWhileBucketIsCreated() throws Exception {
-    createPersistentRegion(vm0).await();
+    createPersistentRegion(vm0);
 
     // create a bucket on vm0
     createData(vm0, 0, 1, "A", "region1");
 
     // create the pr on vm1, which won't have any buckets
-    createPersistentRegion(vm1).await();
+    createPersistentRegion(vm1);
 
     CompletableFuture<BackupStatus> backupStatusFuture =
-        CompletableFuture.supplyAsync(() -> backupMember(vm2));
+        CompletableFuture.supplyAsync(() -> backup(vm2));
     CompletableFuture<Void> createDataFuture =
         CompletableFuture.runAsync(() -> createData(vm0, 1, 5, "A", 
"region1"));
     CompletableFuture.allOf(backupStatusFuture, createDataFuture);
@@ -256,11 +241,11 @@ public void testBackupWhileBucketIsCreated() throws 
Exception {
     closeCache(vm1);
 
     // destroy the current data
-    cleanDiskDirsInEveryVM();
+    invokeInEveryVM("cleanDiskDirs", () -> cleanDiskDirs());
 
     restoreBackup(2);
 
-    createPersistentRegions();
+    createPersistentRegionsAsync();
 
     checkData(vm0, 0, 1, "A", "region1");
   }
@@ -286,13 +271,13 @@ public void testWhileBucketIsMovedBackup(final 
WhenToInvokeBackup whenToInvokeBa
       
DistributionMessageObserver.setInstance(createTestHookToBackup(whenToInvokeBackup));
     });
 
-    createPersistentRegion(vm0).await();
+    createPersistentRegion(vm0);
 
     // create twos bucket on vm0
     createData(vm0, 0, 2, "A", "region1");
 
     // create the pr on vm1, which won't have any buckets
-    createPersistentRegion(vm1).await();
+    createPersistentRegion(vm1);
 
     // Perform a rebalance. This will trigger the backup in the middle of the 
bucket move.
     vm0.invoke("Do rebalance", () -> {
@@ -314,11 +299,13 @@ public void testWhileBucketIsMovedBackup(final 
WhenToInvokeBackup whenToInvokeBa
     closeCache(vm1);
 
     // Destroy the current data
-    cleanDiskDirsInEveryVM();
+    invokeInEveryVM("Clean disk dirs", () -> {
+      cleanDiskDirs();
+    });
 
     restoreBackup(2);
 
-    createPersistentRegions();
+    createPersistentRegionsAsync();
 
     checkData(vm0, 0, 2, "A", "region1");
   }
@@ -337,14 +324,16 @@ public void 
testBackupStatusCleanedUpAfterFailureOnOneMember() throws Exception
           
createTestHookToThrowIOExceptionBeforeProcessingPrepareBackupRequest(exceptionMessage));
     });
 
-    createPersistentRegions();
+    createPersistentRegion(vm0);
+    createPersistentRegion(vm1);
 
-    createData();
+    createData(vm0, 0, 5, "A", "region1");
+    createData(vm0, 0, 5, "B", "region2");
 
-    assertThatThrownBy(() -> 
backupMember(vm2)).hasRootCauseInstanceOf(IOException.class);
+    assertThatThrownBy(() -> 
backup(vm2)).hasRootCauseInstanceOf(IOException.class);
 
     // second backup should succeed because the observer and backup state has 
been cleared
-    BackupStatus status = backupMember(vm2);
+    BackupStatus status = backup(vm2);
     assertThat(status.getBackedUpDiskStores()).hasSize(2);
     assertThat(status.getOfflineDiskStores()).isEmpty();
   }
@@ -354,12 +343,13 @@ public void 
testBackupStatusCleanedUpAfterFailureOnOneMember() throws Exception
    */
   @Test
   public void testBackupOverflow() throws Exception {
-    createPersistentRegion(vm0).await();
+    createPersistentRegion(vm0);
     createOverflowRegion(vm1);
 
-    createData();
+    createData(vm0, 0, 5, "A", "region1");
+    createData(vm0, 0, 5, "B", "region2");
 
-    BackupStatus status = backupMember(vm2);
+    BackupStatus status = backup(vm2);
     assertThat(status.getBackedUpDiskStores()).hasSize(1);
     
assertThat(status.getBackedUpDiskStores().values().iterator().next()).hasSize(2);
     assertThat(status.getOfflineDiskStores()).isEmpty();
@@ -369,15 +359,16 @@ public void testBackupOverflow() throws Exception {
 
   @Test
   public void testBackupPRWithOfflineMembers() throws Exception {
-    createPersistentRegion(vm0).await();
-    createPersistentRegion(vm1).await();
-    createPersistentRegion(vm2).await();
+    createPersistentRegion(vm0);
+    createPersistentRegion(vm1);
+    createPersistentRegion(vm2);
 
-    createData();
+    createData(vm0, 0, 5, "A", "region1");
+    createData(vm0, 0, 5, "B", "region2");
 
     closeCache(vm2);
 
-    BackupStatus status = backupMember(vm3);
+    BackupStatus status = backup(vm3);
     assertThat(status.getBackedUpDiskStores()).hasSize(2);
     assertThat(status.getOfflineDiskStores()).hasSize(2);
   }
@@ -386,69 +377,14 @@ private DistributionMessageObserver 
createTestHookToBackup(
       WhenToInvokeBackup backupInvocationTestHook) {
     switch (backupInvocationTestHook) {
       case BEFORE_SENDING_DESTROYREGIONMESSAGE:
-        return createTestHookToBackupBeforeSendingDestroyRegionMessage(() -> 
backupMember(vm2));
+        return createTestHookToBackupBeforeSendingDestroyRegionMessage(() -> 
backup(vm2));
       case BEFORE_PROCESSING_REPLYMESSAGE:
-        return createTestHookToBackupBeforeProcessingReplyMessage(() -> 
backupMember(vm2));
+        return createTestHookToBackupBeforeProcessingReplyMessage(() -> 
backup(vm2));
       default:
         throw new AssertionError("Invalid backupInvocationTestHook " + 
backupInvocationTestHook);
     }
   }
 
-  /**
-   * Test what happens when we restart persistent members while there is an 
accessor concurrently
-   * performing puts.
-   */
-  @Test
-  public void testRecoverySystemWithConcurrentPutter() throws Throwable {
-    createColatedPersistentRegions(vm1).await();
-    createColatedPersistentRegions(vm2).await();
-
-    createAccessor(vm0);
-
-    createData(vm0, 0, NUM_BUCKETS, "a", "region1");
-    createData(vm0, 0, NUM_BUCKETS, "a", "region2");
-
-
-    // backup the system. We use this to get a snapshot of vm1 and vm2
-    // when they both are online. Recovering from this backup simulates
-    // a simulataneous kill and recovery.
-    backupMember(vm3);
-
-    closeCache(vm1);
-    closeCache(vm2);
-
-    cleanDiskDirsInEveryVM();
-    restoreBackup(2);
-
-    // in vm0, start doing a bunch of concurrent puts.
-    AsyncInvocation async0 = vm0.invokeAsync(() -> {
-      Cache cache = getCache();
-      Region region = cache.getRegion("region1");
-      try {
-        for (int i = 0;; i++) {
-          try {
-            region.get(i % NUM_BUCKETS);
-          } catch (PartitionOfflineException | 
PartitionedRegionStorageException expected) {
-            // do nothing.
-          }
-        }
-      } catch (CacheClosedException expected) {
-        // ok, we're done.
-      }
-    });
-
-    AsyncInvocation async1 = createColatedPersistentRegions(vm1);
-    AsyncInvocation async2 = createColatedPersistentRegions(vm2);
-    async1.await();
-    async2.await();
-
-    // close the cache in vm0 to stop the async puts.
-    closeCache(vm0);
-
-    // make sure we didn't get an exception
-    async0.await();
-  }
-
   private DistributionMessageObserver 
createTestHookToBackupBeforeProcessingReplyMessage(
       Runnable task) {
     return new DistributionMessageObserver() {
@@ -492,12 +428,8 @@ public void beforeSendMessage(DistributionManager dm, 
DistributionMessage messag
   }
 
   private void cleanDiskDirsInEveryVM() {
-    workingDirByVm.forEach((vm, file) -> {
-      try {
-        FileUtils.deleteDirectory(file);
-      } catch (IOException e) {
-        throw new RuntimeException(e);
-      }
+    invokeInEveryVM("cleanDiskDirsInEveryVM", () -> {
+      cleanDiskDirs();
     });
   }
 
@@ -519,113 +451,151 @@ public void beforeProcessMessage(DistributionManager 
dm, DistributionMessage mes
     };
   }
 
-  private void createPersistentRegions() throws ExecutionException, 
InterruptedException {
-    AsyncInvocation create1 = createPersistentRegion(vm0);
-    AsyncInvocation create2 = createPersistentRegion(vm1);
-    create1.await();
-    create2.await();
+  private void createPersistentRegionsAsync() throws ExecutionException, 
InterruptedException {
+    AsyncInvocation async0 = createPersistentRegionAsync(vm0);
+    AsyncInvocation async1 = createPersistentRegionAsync(vm1);
+    async0.await();
+    async1.await();
   }
 
   private void validateBackupComplete() {
+    File backupDir = getBackupDir();
     Pattern pattern = Pattern.compile(".*INCOMPLETE.*");
-    File[] files = backupBaseDir.listFiles((dir1, name) -> 
pattern.matcher(name).matches());
+    File[] files = backupDir.listFiles((dir1, name) -> 
pattern.matcher(name).matches());
     assertNotNull(files);
     assertTrue(files.length == 0);
   }
 
-  private void deleteOldUserUserFile(final VM vm) {
-    vm.invoke(() -> {
-      File userDir = new File(workingDirByVm.get(vm), "userbackup-");
-      FileUtils.deleteDirectory(userDir);
-    });
-  }
-
-  private long setBackupFiles(final VM vm) {
-    return vm.invoke(() -> {
-      File workingDir = workingDirByVm.get(vm);
-      File test1 = new File(workingDir, "test1");
-      File test2 = new File(test1, "test2");
-      File mytext = new File(test2, "my.txt");
-      final ArrayList<File> backuplist = new ArrayList<>();
-      test2.mkdirs();
-      Files.createFile(mytext.toPath());
-      long lastModified = mytext.lastModified();
-      backuplist.add(test2);
-
-      GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
-      cache.setBackupFiles(backuplist);
-
-      return lastModified;
-    });
+  private void createPersistentRegion(VM vm) throws Exception {
+    createPersistentRegionAsync(vm).await();
   }
 
-  private void verifyUserFileRestored(VM vm, final long lm) {
-    vm.invoke(() -> {
-      File workingDir = workingDirByVm.get(vm);
-      File test1 = new File(workingDir, "test1");
-      File test2 = new File(test1, "test2");
-      File mytext = new File(test2, "my.txt");
-      assertTrue(mytext.exists());
-      assertEquals(lm, mytext.lastModified());
-    });
+  private void deleteOldUserUserFile(final VM vm) {
+    SerializableRunnable validateUserFileBackup = new 
SerializableRunnable("set user backups") {
+      @Override
+      public void run() {
+        try {
+          FileUtils.deleteDirectory(new File("userbackup_" + vm.getId()));
+        } catch (IOException e) {
+          fail(e.getMessage());
+        }
+      }
+    };
+    vm.invoke(validateUserFileBackup);
   }
 
-  private AsyncInvocation createPersistentRegion(final VM vm) {
-    return vm.invokeAsync(() -> {
-      Cache cache = getCache();
-      DiskStore diskStore1 = cache.createDiskStoreFactory()
-          .setDiskDirs(getDiskDirs(vm, "vm" + vm.getId() + 
"diskstores_1")).setMaxOplogSize(1)
-          .create(getUniqueName());
+  private long setBackupFiles(final VM vm) {
+    SerializableCallable setUserBackups = new SerializableCallable("set user 
backups") {
+      @Override
+      public Object call() {
+        final int pid = DUnitEnv.get().getPid();
+        File vmdir = new File("userbackup_" + pid);
+        File test1 = new File(vmdir, "test1");
+        File test2 = new File(test1, "test2");
+        File mytext = new File(test2, "my.txt");
+        final ArrayList<File> backuplist = new ArrayList<>();
+        test2.mkdirs();
+        PrintStream ps = null;
+        try {
+          ps = new PrintStream(mytext);
+        } catch (FileNotFoundException e) {
+          fail(e.getMessage());
+        }
+        ps.println(pid);
+        ps.close();
+        mytext.setExecutable(true, true);
+        long lastModified = mytext.lastModified();
+        backuplist.add(test2);
 
-      DiskStore diskStore2 = cache.createDiskStoreFactory()
-          .setDiskDirs(getDiskDirs(vm, "vm" + vm.getId() + 
"diskstores_2")).setMaxOplogSize(1)
-          .create(getUniqueName() + 2);
+        Cache cache = getCache();
+        GemFireCacheImpl gfci = (GemFireCacheImpl) cache;
+        gfci.setBackupFiles(backuplist);
 
-      RegionFactory regionFactory = 
cache.createRegionFactory(RegionShortcut.PARTITION_PERSISTENT)
-          .setPartitionAttributes(new 
PartitionAttributesFactory().setRedundantCopies(0).create());
+        return lastModified;
+      }
+    };
+    return (long) vm.invoke(setUserBackups);
+  }
 
-      
regionFactory.setDiskStoreName(diskStore1.getName()).setDiskSynchronous(true)
-          .create("region1");
-      
regionFactory.setDiskStoreName(diskStore2.getName()).setDiskSynchronous(true)
-          .create("region2");
+  private void verifyUserFileRestored(VM vm, final long lm) {
+    vm.invoke(new SerializableRunnable() {
+      @Override
+      public void run() {
+        final int pid = DUnitEnv.get().getPid();
+        File vmdir = new File("userbackup_" + pid);
+        File mytext = new File(vmdir, "test1/test2/my.txt");
+        assertTrue(mytext.exists());
+        if (System.getProperty("java.specification.version").equals("1.6")) {
+          assertTrue(mytext.canExecute());
+        } else {
+          System.out.println(
+              "java.specification.version is " + 
System.getProperty("java.specification.version")
+                  + ", canExecute is" + mytext.canExecute());
+        }
+        assertEquals(lm, mytext.lastModified());
+
+        try {
+          FileReader fr = new FileReader(mytext);
+          BufferedReader bin = new BufferedReader(fr);
+          String content = bin.readLine();
+          assertTrue(content.equals("" + pid));
+        } catch (IOException e) {
+          fail(e.getMessage());
+        }
+      }
     });
   }
 
-  private AsyncInvocation createColatedPersistentRegions(final VM vm) {
-    return vm.invokeAsync(() -> {
-      Cache cache = getCache();
-      DiskStore diskStore1 = cache.createDiskStoreFactory()
-          .setDiskDirs(getDiskDirs(vm, "vm" + vm.getId() + 
"diskstores_1")).setMaxOplogSize(1)
-          .create(getUniqueName());
-
-      DiskStore diskStore2 = cache.createDiskStoreFactory()
-          .setDiskDirs(getDiskDirs(vm, "vm" + vm.getId() + 
"diskstores_2")).setMaxOplogSize(1)
-          .create(getUniqueName() + 2);
-
-      RegionFactory regionFactory = 
cache.createRegionFactory(RegionShortcut.PARTITION_PERSISTENT)
-          .setPartitionAttributes(new 
PartitionAttributesFactory().setRedundantCopies(0).create());
-
-      
regionFactory.setDiskStoreName(diskStore1.getName()).setDiskSynchronous(true)
-          .create("region1");
-      
regionFactory.setDiskStoreName(diskStore2.getName()).setDiskSynchronous(true)
-          .setPartitionAttributes(new 
PartitionAttributesFactory().setRedundantCopies(0)
-              .setColocatedWith("region1").create())
-          .create("region2");
-    });
+  private AsyncInvocation createPersistentRegionAsync(final VM vm) {
+    SerializableRunnable createRegion = new SerializableRunnable("Create 
persistent region") {
+      @Override
+      public void run() {
+        Cache cache = getCache();
+        DiskStoreFactory dsf = cache.createDiskStoreFactory();
+        dsf.setDiskDirs(getDiskDirs(getUniqueName()));
+        dsf.setMaxOplogSize(1);
+        DiskStore ds = dsf.create(getUniqueName());
+
+        RegionFactory rf = new RegionFactory();
+        rf.setDiskStoreName(ds.getName());
+        rf.setDiskSynchronous(true);
+        rf.setDataPolicy(getDataPolicy());
+        PartitionAttributesFactory paf = new PartitionAttributesFactory();
+        paf.setRedundantCopies(0);
+        rf.setPartitionAttributes(paf.create());
+        rf.create("region1");
+
+        dsf = cache.createDiskStoreFactory();
+        dsf.setDiskDirs(getDiskDirs(getUniqueName() + 2));
+        dsf.setMaxOplogSize(1);
+        dsf.create(getUniqueName() + 2);
+        rf.setDiskStoreName(getUniqueName() + 2);
+        rf.create("region2");
+      }
+    };
+    return vm.invokeAsync(createRegion);
   }
 
   private void createOverflowRegion(final VM vm) {
-    vm.invoke(() -> {
-      Cache cache = getCache();
-      DiskStore diskStore = cache.createDiskStoreFactory()
-          .setDiskDirs(getDiskDirs(vm, 
getUniqueName())).create(getUniqueName());
-
-      
cache.createRegionFactory(RegionShortcut.REPLICATE).setDiskStoreName(diskStore.getName())
-          .setDiskSynchronous(true)
-          .setEvictionAttributes(
-              EvictionAttributes.createLIFOEntryAttributes(1, 
EvictionAction.OVERFLOW_TO_DISK))
-          .create("region3");
-    });
+    SerializableRunnable createRegion = new SerializableRunnable("Create 
persistent region") {
+      @Override
+      public void run() {
+        Cache cache = getCache();
+        DiskStoreFactory dsf = cache.createDiskStoreFactory();
+        dsf.setDiskDirs(getDiskDirs(getUniqueName()));
+        dsf.setMaxOplogSize(1);
+        DiskStore ds = dsf.create(getUniqueName());
+
+        RegionFactory rf = new RegionFactory();
+        rf.setDiskStoreName(ds.getName());
+        rf.setDiskSynchronous(true);
+        rf.setDataPolicy(DataPolicy.REPLICATE);
+        rf.setEvictionAttributes(
+            EvictionAttributes.createLIFOEntryAttributes(1, 
EvictionAction.OVERFLOW_TO_DISK));
+        rf.create("region3");
+      }
+    };
+    vm.invoke(createRegion);
   }
 
   @Override
@@ -636,14 +606,19 @@ protected void createData(VM vm, final int startKey, 
final int endKey, final Str
   @Override
   protected void createData(VM vm, final int startKey, final int endKey, final 
String value,
       final String regionName) {
-    vm.invoke(() -> {
-      Cache cache = getCache();
-      Region region = cache.getRegion(regionName);
+    SerializableRunnable createData = new SerializableRunnable() {
+
+      @Override
+      public void run() {
+        Cache cache = getCache();
+        Region region = cache.getRegion(regionName);
 
-      for (int i = startKey; i < endKey; i++) {
-        region.put(i, value);
+        for (int i = startKey; i < endKey; i++) {
+          region.put(i, value);
+        }
       }
-    });
+    };
+    vm.invoke(createData);
   }
 
   @Override
@@ -654,18 +629,32 @@ protected void checkData(VM vm, final int startKey, final 
int endKey, final Stri
   @Override
   protected void checkData(VM vm, final int startKey, final int endKey, final 
String value,
       final String regionName) {
-    vm.invoke(() -> {
-      Region region = getCache().getRegion(regionName);
+    SerializableRunnable checkData = new SerializableRunnable() {
 
-      for (int i = startKey; i < endKey; i++) {
-        assertEquals(value, region.get(i));
+      @Override
+      public void run() {
+        Cache cache = getCache();
+        Region region = cache.getRegion(regionName);
+
+        for (int i = startKey; i < endKey; i++) {
+          assertEquals(value, region.get(i));
+        }
       }
-    });
+    };
+
+    vm.invoke(checkData);
   }
 
   @Override
   protected void closeCache(final VM vm) {
-    vm.invoke(() -> getCache().close());
+    SerializableRunnable closeCache = new SerializableRunnable("close cache") {
+      @Override
+      public void run() {
+        Cache cache = getCache();
+        cache.close();
+      }
+    };
+    vm.invoke(closeCache);
   }
 
   @Override
@@ -675,70 +664,76 @@ protected void closeCache(final VM vm) {
 
   @Override
   protected Set<Integer> getBucketList(VM vm, final String regionName) {
-    return vm.invoke(() -> {
-      Cache cache = getCache();
-      PartitionedRegion region = (PartitionedRegion) 
cache.getRegion(regionName);
-      return new TreeSet<>(region.getDataStore().getAllLocalBucketIds());
-    });
+    SerializableCallable getBuckets = new SerializableCallable("get buckets") {
+
+      @Override
+      public Object call() throws Exception {
+        Cache cache = getCache();
+        PartitionedRegion region = (PartitionedRegion) 
cache.getRegion(regionName);
+        return new TreeSet<>(region.getDataStore().getAllLocalBucketIds());
+      }
+    };
+
+    return (Set<Integer>) vm.invoke(getBuckets);
   }
 
-  private File[] getDiskDirs(VM vm, String dsName) {
+  private File[] getDiskDirs(String dsName) {
+    File[] dirs = getDiskDirs();
     File[] diskStoreDirs = new File[1];
-    diskStoreDirs[0] = new File(workingDirByVm.get(vm), dsName);
+    diskStoreDirs[0] = new File(dirs[0], dsName);
     diskStoreDirs[0].mkdirs();
     return diskStoreDirs;
   }
 
-  private BackupStatus backupMember(final VM vm) {
-    return vm.invoke("backup", () -> {
-      try {
-        return 
BackupUtil.backupAllMembers(getSystem().getDistributionManager(), backupBaseDir,
-            null);
-      } catch (ManagementException e) {
-        throw new RuntimeException(e);
+  private DataPolicy getDataPolicy() {
+    return DataPolicy.PERSISTENT_PARTITION;
+  }
+
+  void checkRecoveredFromDisk(VM vm, final int bucketId, final boolean 
recoveredLocally) {
+    vm.invoke(new SerializableRunnable("check recovered from disk") {
+      @Override
+      public void run() {
+        Cache cache = getCache();
+        PartitionedRegion region = (PartitionedRegion) 
cache.getRegion(getPartitionedRegionName());
+        DiskRegion disk = 
region.getRegionAdvisor().getBucket(bucketId).getDiskRegion();
+        if (recoveredLocally) {
+          assertEquals(0, disk.getStats().getRemoteInitializations());
+          assertEquals(1, disk.getStats().getLocalInitializations());
+        } else {
+          assertEquals(1, disk.getStats().getRemoteInitializations());
+          assertEquals(0, disk.getStats().getLocalInitializations());
+        }
       }
     });
   }
 
-  protected void restoreBackup(final int expectedNumScripts)
-      throws IOException, InterruptedException {
-    Collection<File> restoreScripts =
-        listFiles(backupBaseDir, new RegexFileFilter(".*restore.*"), 
DIRECTORY);
-    assertThat(restoreScripts).hasSize(expectedNumScripts);
-    for (File script : restoreScripts) {
-      execute(script);
+  /**
+   * Recursively delete a file or directory. A description of any files or 
directories that can not
+   * be deleted will be added to failures if failures is non-null. This method 
tries to delete as
+   * much as possible.
+   */
+  public static void delete(File file, StringBuilder failures) {
+    if (!file.exists()) {
+      return;
     }
-  }
 
-  private void execute(final File script) throws IOException, 
InterruptedException {
-    ProcessBuilder processBuilder = new 
ProcessBuilder(script.getAbsolutePath());
-    processBuilder.redirectErrorStream(true);
-    Process process = processBuilder.start();
-
-    try (BufferedReader reader =
-        new BufferedReader(new InputStreamReader(process.getInputStream()))) {
-      String line;
-      while ((line = reader.readLine()) != null) {
-        logger.info("OUTPUT:" + line);
+    if (file.isDirectory()) {
+      File[] fileList = file.listFiles();
+      if (fileList != null) {
+        for (File child : fileList) {
+          delete(child, failures);
+        }
       }
     }
 
-    assertThat(process.waitFor()).isEqualTo(0);
-  }
-
-  private void createAccessor(VM vm) {
-    vm.invoke(() -> {
-      Cache cache = getCache();
-
-      cache.createRegionFactory(RegionShortcut.PARTITION)
-          .setPartitionAttributes(
-              new 
PartitionAttributesFactory().setRedundantCopies(0).setLocalMaxMemory(0).create())
-          .create("region1");
-      cache.createRegionFactory(RegionShortcut.PARTITION)
-          .setPartitionAttributes(new 
PartitionAttributesFactory().setColocatedWith("region1")
-              .setRedundantCopies(0).setLocalMaxMemory(0).create())
-          .create("region2");
-    });
+    try {
+      Files.delete(file.toPath());
+    } catch (IOException e) {
+      if (failures != null) {
+        failures.append("Could not delete ").append(file).append(" due to 
").append(e.getMessage())
+            .append('\n');
+      }
+    }
   }
 
   enum WhenToInvokeBackup {
diff --git 
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/BackupIntegrationTest.java
 
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/BackupIntegrationTest.java
index 78ab872bfd..ee587380eb 100644
--- 
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/BackupIntegrationTest.java
+++ 
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/BackupIntegrationTest.java
@@ -319,7 +319,7 @@ public void testBackupCacheXml() throws Exception {
     backup.prepareForBackup();
     backup.doBackup(backupDir, null, false);
     Collection<File> fileCollection = FileUtils.listFiles(backupDir,
-        new RegexFileFilter("BackupIntegrationTest.cache.xml"), 
DirectoryFileFilter.DIRECTORY);
+        new RegexFileFilter("cache.xml"), DirectoryFileFilter.DIRECTORY);
     assertEquals(1, fileCollection.size());
     File cacheXmlBackup = fileCollection.iterator().next();
     assertTrue(cacheXmlBackup.exists());
diff --git 
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/BackupPrepareAndFinishMsgDUnitTest.java
 
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/BackupPrepareAndFinishMsgDUnitTest.java
index 1b30f749e7..f48f7f355a 100644
--- 
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/BackupPrepareAndFinishMsgDUnitTest.java
+++ 
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/BackupPrepareAndFinishMsgDUnitTest.java
@@ -236,4 +236,5 @@ public void beforeWaitForBackupCompletion() {
     }
     return regionFactory.create(TEST_REGION_NAME);
   }
+
 }
diff --git 
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/BackupReplyProcessorTest.java
 
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/BackupReplyProcessorTest.java
index a32f78a261..e01485285c 100644
--- 
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/BackupReplyProcessorTest.java
+++ 
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/BackupReplyProcessorTest.java
@@ -14,12 +14,8 @@
  */
 package org.apache.geode.internal.cache.backup;
 
-import static org.assertj.core.api.Assertions.assertThat;
-import static org.mockito.Mockito.eq;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
+import static org.assertj.core.api.Assertions.*;
+import static org.mockito.Mockito.*;
 
 import java.util.HashSet;
 import java.util.Set;
diff --git 
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/FileSystemBackupDestinationTest.java
 
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/FileSystemBackupDestinationTest.java
deleted file mode 100644
index 752d88cb41..0000000000
--- 
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/FileSystemBackupDestinationTest.java
+++ /dev/null
@@ -1,199 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more 
contributor license
- * agreements. See the NOTICE file distributed with this work for additional 
information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache 
License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the 
License. You may obtain a
- * copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software 
distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 
KIND, either express
- * or implied. See the License for the specific language governing permissions 
and limitations under
- * the License.
- */
-package org.apache.geode.internal.cache.backup;
-
-import static 
org.apache.geode.internal.cache.backup.BackupDestination.CONFIG_DIRECTORY;
-import static 
org.apache.geode.internal.cache.backup.BackupDestination.DATA_STORES_DIRECTORY;
-import static 
org.apache.geode.internal.cache.backup.BackupDestination.DEPLOYED_JARS_DIRECTORY;
-import static 
org.apache.geode.internal.cache.backup.BackupDestination.README_FILE;
-import static 
org.apache.geode.internal.cache.backup.BackupDestination.USER_FILES_DIRECTORY;
-import static 
org.apache.geode.internal.cache.backup.FileSystemBackupDestination.INCOMPLETE_BACKUP_FILE;
-import static org.assertj.core.api.Assertions.assertThat;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
-
-import java.io.IOException;
-import java.nio.file.Files;
-import java.nio.file.Path;
-
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-import org.junit.rules.TemporaryFolder;
-
-import org.apache.geode.internal.cache.DiskStoreImpl;
-import org.apache.geode.internal.cache.GemFireCacheImpl;
-import org.apache.geode.internal.cache.Oplog;
-import org.apache.geode.internal.cache.persistence.DiskStoreID;
-import org.apache.geode.test.junit.categories.IntegrationTest;
-
-@Category(IntegrationTest.class)
-public class FileSystemBackupDestinationTest {
-
-  @Rule
-  public TemporaryFolder tempDir = new TemporaryFolder();
-
-  private BackupDefinition backupDefinition;
-  private Path targetDir;
-  private RestoreScript restoreScript;
-
-  @Before
-  public void setup() throws IOException {
-    backupDefinition = new BackupDefinition();
-    Path backupDirectory = tempDir.newFolder("backups").toPath();
-    targetDir = backupDirectory.resolve("backupTarget");
-    restoreScript = mock(RestoreScript.class);
-    when(restoreScript.generate(any())).thenReturn(tempDir.newFile());
-  }
-
-  @Test
-  public void userFilesAreBackedUp() throws Exception {
-    Path userFile = tempDir.newFile("userFile").toPath();
-    Path userSubdir = tempDir.newFolder("userSubDir").toPath();
-    Path userFileInDir = Files.write(userSubdir.resolve("fileInDir"), new 
byte[] {});
-    backupDefinition.addUserFilesToBackup(userFile);
-    backupDefinition.addUserFilesToBackup(userSubdir);
-
-    executeBackup();
-
-    Path userDir = targetDir.resolve(USER_FILES_DIRECTORY);
-    assertThat(userDir.resolve(userFile.getFileName())).exists();
-    assertThat(userDir.resolve(userSubdir.getFileName())).exists();
-    
assertThat(userDir.resolve(userSubdir.getFileName()).resolve(userFileInDir.getFileName()))
-        .exists();
-  }
-
-  @Test
-  public void deployedJarsAreBackedUp() throws Exception {
-    Path jarFile = tempDir.newFile("jarFile").toPath();
-    Path jarSubdir = tempDir.newFolder("jarSubdir").toPath();
-    Path jarInSubdir = Files.write(jarSubdir.resolve("jarInSubdir"), new 
byte[] {});
-    backupDefinition.addDeployedJarToBackup(jarFile);
-    backupDefinition.addDeployedJarToBackup(jarSubdir);
-
-    executeBackup();
-
-    Path userDir = targetDir.resolve(DEPLOYED_JARS_DIRECTORY);
-    assertThat(userDir.resolve(jarFile.getFileName())).exists();
-    assertThat(userDir.resolve(jarSubdir.getFileName())).exists();
-    
assertThat(userDir.resolve(jarSubdir.getFileName()).resolve(jarInSubdir.getFileName()))
-        .exists();
-  }
-
-  @Test
-  public void configFilesAreBackedUp() throws Exception {
-    Path cacheXml = tempDir.newFile("cache.xml").toPath();
-    Path propertyFile = tempDir.newFile("properties").toPath();
-    backupDefinition.addConfigFileToBackup(cacheXml);
-    backupDefinition.addConfigFileToBackup(propertyFile);
-
-    executeBackup();
-
-    Path configDir = targetDir.resolve(CONFIG_DIRECTORY);
-    assertThat(configDir.resolve(cacheXml.getFileName())).exists();
-    assertThat(configDir.resolve(propertyFile.getFileName())).exists();
-  }
-
-  @Test
-  public void oplogFilesAreBackedUp() throws Exception {
-    DiskStoreImpl diskStore = mock(DiskStoreImpl.class);
-    when(diskStore.getDiskStoreID()).thenReturn(new DiskStoreID(1, 2));
-    Oplog oplog = mock(Oplog.class);
-    when(oplog.getCrfFile()).thenReturn(tempDir.newFile("crf"));
-    when(oplog.getDrfFile()).thenReturn(tempDir.newFile("drf"));
-    when(oplog.getKrfFile()).thenReturn(tempDir.newFile("krf"));
-    when(diskStore.getInforFileDirIndex()).thenReturn(1);
-
-    backupDefinition.addOplogFileToBackup(diskStore, 
oplog.getCrfFile().toPath());
-    backupDefinition.addOplogFileToBackup(diskStore, 
oplog.getDrfFile().toPath());
-    backupDefinition.addOplogFileToBackup(diskStore, 
oplog.getKrfFile().toPath());
-
-    executeBackup();
-
-    Path diskStoreDir = targetDir.resolve(DATA_STORES_DIRECTORY)
-        .resolve(GemFireCacheImpl.getDefaultDiskStoreName() + "_1-2");
-    assertThat(diskStoreDir.resolve("dir1").resolve("crf")).exists();
-    assertThat(diskStoreDir.resolve("dir1").resolve("drf")).exists();
-    assertThat(diskStoreDir.resolve("dir1").resolve("krf")).exists();
-  }
-
-  @Test
-  public void diskInitFilesAreBackedUp() throws Exception {
-    DiskStoreImpl diskStore1 = mock(DiskStoreImpl.class);
-    when(diskStore1.getDiskStoreID()).thenReturn(new DiskStoreID(1, 2));
-    when(diskStore1.getInforFileDirIndex()).thenReturn(1);
-    DiskStoreImpl diskStore2 = mock(DiskStoreImpl.class);
-    when(diskStore2.getDiskStoreID()).thenReturn(new DiskStoreID(1, 2));
-    when(diskStore2.getInforFileDirIndex()).thenReturn(2);
-    Path initFile1 = tempDir.newFolder("dir1").toPath().resolve("initFile1");
-    Path initFile2 = tempDir.newFolder("dir2").toPath().resolve("initFile2");
-    Files.createFile(initFile1);
-    Files.createFile(initFile2);
-    backupDefinition.addDiskInitFile(diskStore1, initFile1);
-    backupDefinition.addDiskInitFile(diskStore2, initFile2);
-
-    executeBackup();
-
-    Path diskStoreDir = targetDir.resolve(DATA_STORES_DIRECTORY)
-        .resolve(GemFireCacheImpl.getDefaultDiskStoreName() + "_1-2");
-    assertThat(diskStoreDir.resolve("dir1").resolve("initFile1")).exists();
-    assertThat(diskStoreDir.resolve("dir2").resolve("initFile2")).exists();
-  }
-
-  @Test
-  public void restoreScriptIsBackedUp() throws Exception {
-    Path restoreScriptPath = tempDir.newFile("restoreScript").toPath();
-    when(restoreScript.generate(any())).thenReturn(restoreScriptPath.toFile());
-    backupDefinition.setRestoreScript(restoreScript);
-
-    executeBackup();
-
-    assertThat(targetDir.resolve("restoreScript")).exists();
-  }
-
-  @Test
-  public void backupContainsReadMe() throws IOException {
-    executeBackup();
-
-    assertThat(targetDir.resolve(README_FILE)).exists();
-  }
-
-  @Test
-  public void leavesBehindIncompleteFileOnFailure() throws Exception {
-    Path notCreatedFile = 
tempDir.newFolder("dir1").toPath().resolve("notCreated");
-    backupDefinition.addDeployedJarToBackup(notCreatedFile);
-
-    try {
-      executeBackup();
-    } catch (IOException ignore) {
-      // expected to occur on missing file
-    }
-
-    assertThat(targetDir.resolve(INCOMPLETE_BACKUP_FILE)).exists();
-  }
-
-  @Test
-  public void doesNotLeaveBehindIncompleteFileOnSuccess() throws Exception {
-    executeBackup();
-    assertThat(targetDir.resolve(INCOMPLETE_BACKUP_FILE)).doesNotExist();
-  }
-
-  private void executeBackup() throws IOException {
-    BackupDestination backupDestination = new 
FileSystemBackupDestination(targetDir);
-    backupDestination.backupFiles(backupDefinition);
-  }
-}
diff --git 
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/FinishBackupFactoryTest.java
 
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/FinishBackupFactoryTest.java
index b91ec19980..ce2e7f472b 100644
--- 
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/FinishBackupFactoryTest.java
+++ 
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/FinishBackupFactoryTest.java
@@ -14,9 +14,8 @@
  */
 package org.apache.geode.internal.cache.backup;
 
-import static org.assertj.core.api.Assertions.assertThat;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
+import static org.assertj.core.api.Assertions.*;
+import static org.mockito.Mockito.*;
 
 import java.io.File;
 import java.util.HashSet;
@@ -27,6 +26,7 @@
 import org.junit.experimental.categories.Category;
 
 import org.apache.geode.CancelCriterion;
+import org.apache.geode.cache.persistence.PersistentID;
 import org.apache.geode.distributed.internal.DM;
 import org.apache.geode.distributed.internal.InternalDistributedSystem;
 import 
org.apache.geode.distributed.internal.membership.InternalDistributedMember;
diff --git 
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/FinishBackupOperationTest.java
 
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/FinishBackupOperationTest.java
index dc68f35b59..c0972e8112 100644
--- 
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/FinishBackupOperationTest.java
+++ 
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/FinishBackupOperationTest.java
@@ -14,15 +14,8 @@
  */
 package org.apache.geode.internal.cache.backup;
 
-import static org.assertj.core.api.Assertions.assertThat;
-import static org.mockito.Mockito.doAnswer;
-import static org.mockito.Mockito.doThrow;
-import static org.mockito.Mockito.eq;
-import static org.mockito.Mockito.inOrder;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
+import static org.assertj.core.api.Assertions.*;
+import static org.mockito.Mockito.*;
 
 import java.io.File;
 import java.io.IOException;
diff --git 
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/FlushToDiskFactoryTest.java
 
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/FlushToDiskFactoryTest.java
index e3245c06c7..cfb62ab8cf 100644
--- 
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/FlushToDiskFactoryTest.java
+++ 
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/FlushToDiskFactoryTest.java
@@ -14,10 +14,10 @@
  */
 package org.apache.geode.internal.cache.backup;
 
-import static org.assertj.core.api.Assertions.assertThat;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
+import static org.assertj.core.api.Assertions.*;
+import static org.mockito.Mockito.*;
 
+import java.io.File;
 import java.util.HashSet;
 import java.util.Set;
 
@@ -26,6 +26,7 @@
 import org.junit.experimental.categories.Category;
 
 import org.apache.geode.CancelCriterion;
+import org.apache.geode.cache.persistence.PersistentID;
 import org.apache.geode.distributed.internal.DM;
 import org.apache.geode.distributed.internal.InternalDistributedSystem;
 import 
org.apache.geode.distributed.internal.membership.InternalDistributedMember;
diff --git 
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/FlushToDiskOperationTest.java
 
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/FlushToDiskOperationTest.java
index 7e4364930e..a79b43f7ef 100644
--- 
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/FlushToDiskOperationTest.java
+++ 
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/FlushToDiskOperationTest.java
@@ -14,13 +14,7 @@
  */
 package org.apache.geode.internal.cache.backup;
 
-import static org.mockito.Mockito.doThrow;
-import static org.mockito.Mockito.eq;
-import static org.mockito.Mockito.inOrder;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
+import static org.mockito.Mockito.*;
 
 import java.util.HashSet;
 import java.util.Set;
diff --git 
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/FlushToDiskRequestTest.java
 
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/FlushToDiskRequestTest.java
index 762920c9e7..113bb70d50 100644
--- 
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/FlushToDiskRequestTest.java
+++ 
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/FlushToDiskRequestTest.java
@@ -14,12 +14,8 @@
  */
 package org.apache.geode.internal.cache.backup;
 
-import static org.assertj.core.api.Assertions.assertThat;
-import static org.mockito.Mockito.eq;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
+import static org.assertj.core.api.Assertions.*;
+import static org.mockito.Mockito.*;
 
 import java.util.HashSet;
 import java.util.Set;
diff --git 
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/IncrementalBackupDistributedTest.java
 
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/IncrementalBackupDistributedTest.java
index d71bb85f49..69b6478919 100644
--- 
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/IncrementalBackupDistributedTest.java
+++ 
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/IncrementalBackupDistributedTest.java
@@ -27,7 +27,6 @@
 import java.io.InputStreamReader;
 import java.nio.file.Files;
 import java.util.Collection;
-import java.util.HashMap;
 import java.util.HashSet;
 import java.util.LinkedList;
 import java.util.List;
@@ -38,8 +37,6 @@
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.io.filefilter.DirectoryFileFilter;
 import org.apache.commons.io.filefilter.RegexFileFilter;
-import org.apache.logging.log4j.Logger;
-import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -48,8 +45,6 @@
 import org.apache.geode.cache.Cache;
 import org.apache.geode.cache.CacheFactory;
 import org.apache.geode.cache.DiskStore;
-import org.apache.geode.cache.PartitionAttributes;
-import org.apache.geode.cache.PartitionAttributesFactory;
 import org.apache.geode.cache.Region;
 import org.apache.geode.cache.RegionFactory;
 import org.apache.geode.cache.RegionShortcut;
@@ -61,7 +56,7 @@
 import org.apache.geode.internal.DeployedJar;
 import org.apache.geode.internal.cache.DiskStoreImpl;
 import org.apache.geode.internal.cache.InternalCache;
-import org.apache.geode.internal.logging.LogService;
+import org.apache.geode.internal.util.IOUtils;
 import org.apache.geode.internal.util.TransformUtils;
 import org.apache.geode.management.BackupStatus;
 import org.apache.geode.management.ManagementException;
@@ -75,7 +70,6 @@
 import org.apache.geode.test.dunit.WaitCriterion;
 import org.apache.geode.test.dunit.cache.internal.JUnit4CacheTestCase;
 import org.apache.geode.test.junit.categories.DistributedTest;
-import 
org.apache.geode.test.junit.rules.serializable.SerializableTemporaryFolder;
 
 /**
  * Tests for the incremental backup feature.
@@ -83,9 +77,6 @@
 @Category(DistributedTest.class)
 @SuppressWarnings("serial")
 public class IncrementalBackupDistributedTest extends JUnit4CacheTestCase {
-
-  private static final Logger logger = LogService.getLogger();
-
   /**
    * Data load increment.
    */
@@ -106,37 +97,22 @@
    */
   private static final String OPLOG_REGEX = ".*\\.[kdc]rf$";
 
-  @Rule
-  public SerializableTemporaryFolder tempDir = new 
SerializableTemporaryFolder();
-
-  private final Map<Integer, File> baseDirectoryByVm = new HashMap<>();
-
   /**
    * Creates test regions for a member.
    */
-  private void createRegions(File baseDirectory, int vmNumber) throws 
IOException {
-    Cache cache = getCache(new CacheFactory().set(LOG_LEVEL, 
LogWriterUtils.getDUnitLogLevel()));
-    cache.createDiskStoreFactory().setDiskDirs(getDiskDirectory(baseDirectory, 
vmNumber))
-        .create("fooStore");
-    cache.createDiskStoreFactory().setDiskDirs(getDiskDirectory(baseDirectory, 
vmNumber))
-        .create("barStore");
-    getRegionFactory(cache).setDiskStoreName("fooStore").create("fooRegion");
-    getRegionFactory(cache).setDiskStoreName("barStore").create("barRegion");
-  }
-
-  private File[] getDiskDirectory(File parent, int vmNumber) throws 
IOException {
-    File dir = new File(parent, "disk" + 
String.valueOf(vmNumber)).getAbsoluteFile();
-    dir.mkdirs();
-    return new File[] {dir};
-  }
+  private final SerializableRunnable createRegions = new 
SerializableRunnable() {
+    @Override
+    public void run() {
+      Cache cache = getCache(new CacheFactory().set(LOG_LEVEL, 
LogWriterUtils.getDUnitLogLevel()));
+      
cache.createDiskStoreFactory().setDiskDirs(getDiskDirs()).create("fooStore");
+      
cache.createDiskStoreFactory().setDiskDirs(getDiskDirs()).create("barStore");
+      getRegionFactory(cache).setDiskStoreName("fooStore").create("fooRegion");
+      getRegionFactory(cache).setDiskStoreName("barStore").create("barRegion");
+    }
+  };
 
   private RegionFactory<Integer, String> getRegionFactory(Cache cache) {
-    PartitionAttributes<Integer, String> attributes =
-        new PartitionAttributesFactory<Integer, 
String>().setTotalNumBuckets(5).create();
-    RegionFactory<Integer, String> factory =
-        cache.<Integer, 
String>createRegionFactory(RegionShortcut.PARTITION_PERSISTENT)
-            .setPartitionAttributes(attributes);
-    return factory;
+    return cache.createRegionFactory(RegionShortcut.PARTITION_PERSISTENT);
   }
 
   /**
@@ -147,6 +123,15 @@ private void createRegions(File baseDirectory, int 
vmNumber) throws IOException
     return file.isDirectory() && file.getName().startsWith("20");
   };
 
+  /**
+   * Abstracts the logging mechanism.
+   *
+   * @param message a message to log.
+   */
+  private void log(String message) {
+    LogWriterUtils.getLogWriter().info("[IncrementalBackupDistributedTest] " + 
message);
+  }
+
   /**
    * @return the baseline backup directory.
    */
@@ -186,6 +171,21 @@ private static File getIncrementalDir() {
     return dir;
   }
 
+  /**
+   * Returns the directory for a given member.
+   *
+   * @param vm a distributed system member.
+   * @return the disk directories for a member.
+   */
+  private File getVMDir(VM vm) {
+    return (File) vm.invoke(new SerializableCallable() {
+      @Override
+      public Object call() throws Exception {
+        return IOUtils.tryGetCanonicalFileElseGetAbsoluteFile(new 
File(getDiskDirs()[0], "../.."));
+      }
+    });
+  }
+
   /**
    * Invokes {@link AdminDistributedSystem#getMissingPersistentMembers()} on a 
member.
    *
@@ -210,12 +210,16 @@ public Object call() {
    * @return the status of the backup.
    */
   private BackupStatus baseline(VM vm) {
-    return vm.invoke(() -> {
-      try {
-        return 
BackupUtil.backupAllMembers(getSystem().getDistributionManager(), 
getBaselineDir(),
-            null);
-      } catch (ManagementException e) {
-        throw new RuntimeException(e);
+    return (BackupStatus) vm.invoke(new SerializableCallable("Backup all 
members.") {
+      @Override
+      public Object call() {
+        try {
+          return 
BackupUtil.backupAllMembers(getSystem().getDistributionManager(), 
getBaselineDir(),
+              null);
+
+        } catch (ManagementException e) {
+          throw new RuntimeException(e);
+        }
       }
     });
   }
@@ -227,12 +231,16 @@ private BackupStatus baseline(VM vm) {
    * @return a status of the backup operation.
    */
   private BackupStatus incremental(VM vm) {
-    return vm.invoke(() -> {
-      try {
-        return 
BackupUtil.backupAllMembers(getSystem().getDistributionManager(),
-            getIncrementalDir(), getBaselineBackupDir());
-      } catch (ManagementException e) {
-        throw new RuntimeException(e);
+    return (BackupStatus) vm.invoke(new SerializableCallable("Backup all 
members.") {
+      @Override
+      public Object call() {
+        try {
+          return 
BackupUtil.backupAllMembers(getSystem().getDistributionManager(),
+              getIncrementalDir(), getBaselineBackupDir());
+
+        } catch (ManagementException e) {
+          throw new RuntimeException(e);
+        }
       }
     });
   }
@@ -244,12 +252,16 @@ private BackupStatus incremental(VM vm) {
    * @return a status of the backup operation.
    */
   private BackupStatus incremental2(VM vm) {
-    return vm.invoke(() -> {
-      try {
-        return 
BackupUtil.backupAllMembers(getSystem().getDistributionManager(),
-            getIncremental2Dir(), getIncrementalBackupDir());
-      } catch (ManagementException e) {
-        throw new RuntimeException(e);
+    return (BackupStatus) vm.invoke(new SerializableCallable("Backup all 
members.") {
+      @Override
+      public Object call() {
+        try {
+          return 
BackupUtil.backupAllMembers(getSystem().getDistributionManager(),
+              getIncremental2Dir(), getIncrementalBackupDir());
+
+        } catch (ManagementException e) {
+          throw new RuntimeException(e);
+        }
       }
     });
   }
@@ -261,8 +273,13 @@ private BackupStatus incremental2(VM vm) {
    * @return the member's id.
    */
   private String getMemberId(VM vm) {
-    return vm.invoke(() -> 
getCache().getDistributedSystem().getDistributedMember().toString()
-        .replaceAll("[^\\w]+", "_"));
+    return (String) vm.invoke(new SerializableCallable("getMemberId") {
+      @Override
+      public Object call() throws Exception {
+        return 
getCache().getDistributedSystem().getDistributedMember().toString()
+            .replaceAll("[^\\w]+", "_");
+      }
+    });
   }
 
   /**
@@ -317,7 +334,7 @@ private PersistentID getPersistentID(final VM vm) {
   private PersistentID disconnect(final VM disconnectVM, final VM testVM) {
     final PersistentID id = disconnectVM.invoke(() -> {
       PersistentID persistentID = null;
-      Collection<DiskStore> diskStores = getCache().listDiskStores();
+      Collection<DiskStore> diskStores = ((InternalCache) 
getCache()).listDiskStores();
       for (DiskStore diskStore : diskStores) {
         if (diskStore.getName().equals("fooStore")) {
           persistentID = ((DiskStoreImpl) diskStore).getPersistentID();
@@ -354,19 +371,8 @@ public String description() {
    *
    * @param vm a member of the distributed system.
    */
-  private void openCache(VM vm) throws IOException {
-    int vmNumber = vm.getId();
-    File vmDir = getBaseDir(vmNumber);
-    vm.invoke(() -> createRegions(vmDir, vmNumber));
-  }
-
-  private File getBaseDir(int vmNumber) throws IOException {
-    File baseDir = baseDirectoryByVm.get(vmNumber);
-    if (baseDir == null) {
-      baseDir = tempDir.newFolder("vm" + vmNumber);
-      baseDirectoryByVm.put(vmNumber, baseDir);
-    }
-    return baseDir;
+  private void openCache(VM vm) {
+    vm.invoke(this.createRegions);
   }
 
   /**
@@ -436,6 +442,15 @@ private static File getIncrementalBackupDir() {
     return dirs[0];
   }
 
+  /**
+   * @return the directory for the completed baseline backup.
+   */
+  private static File getIncremental2BackupDir() {
+    File[] dirs = getIncremental2Dir().listFiles(backupDirFilter);
+    assertEquals(1, dirs.length);
+    return dirs[0];
+  }
+
   /**
    * Returns an individual member's backup directory.
    *
@@ -463,7 +478,7 @@ public boolean accept(File file) {
    * Adds the data region to every participating VM.
    */
   @SuppressWarnings("serial")
-  private void createDataRegions() throws IOException {
+  private void createDataRegions() {
     Host host = Host.getHost(0);
     int numberOfVms = host.getVMCount();
 
@@ -497,11 +512,12 @@ public void run() {
 
           do {
             line = reader.readLine();
+            log(line);
           } while (null != line);
 
           reader.close();
         } catch (IOException e) {
-          logger.info(e);
+          log("Execute: error while reading standard in: " + e.getMessage());
         }
       }
     }).start();
@@ -523,7 +539,7 @@ public void run() {
 
           reader.close();
         } catch (IOException e) {
-          logger.info(e);
+          log("Execute: error while reading standard error: " + 
e.getMessage());
         }
       }
     }).start();
@@ -628,9 +644,10 @@ private void assertBackupStatus(final BackupStatus 
backupStatus) {
   @Override
   public final void postSetUp() throws Exception {
     createDataRegions();
-    File dir = getBaseDir(-1);
-    createRegions(dir, -1);
+    this.createRegions.run();
     loadMoreData();
+
+    log("Data region created and populated.");
   }
 
   /**
@@ -653,8 +670,7 @@ public final void preTearDownCacheTestCase() throws 
Exception {
   public void testIncrementalBackup() throws Exception {
     String memberId = getMemberId(Host.getHost(0).getVM(1));
 
-    File memberDir = baseDirectoryByVm.get(1);
-    // getVMDir(Host.getHost(0).getVM(1));
+    File memberDir = getVMDir(Host.getHost(0).getVM(1));
     assertNotNull(memberDir);
 
     // Find all of the member's oplogs in the disk directory 
(*.crf,*.krf,*.drf)
@@ -691,6 +707,9 @@ public void testIncrementalBackup() throws Exception {
     TransformUtils.transform(memberIncrementalOplogs, 
memberIncrementalOplogNames,
         TransformUtils.fileNameTransformer);
 
+    log("BASELINE OPLOGS = " + memberBaselineOplogNames);
+    log("INCREMENTAL OPLOGS = " + memberIncrementalOplogNames);
+
     /*
      * Assert that the incremental backup does not contain baseline operation 
logs that the member
      * still has copies of.
@@ -713,6 +732,8 @@ public void testIncrementalBackup() throws Exception {
     TransformUtils.transform(memberIncremental2Oplogs, 
memberIncremental2OplogNames,
         TransformUtils.fileNameTransformer);
 
+    log("INCREMENTAL 2 OPLOGS = " + memberIncremental2OplogNames);
+
     /*
      * Assert that the second incremental backup does not contain operation 
logs copied into the
      * baseline.
@@ -1068,12 +1089,16 @@ public void testBackupUserDeployedJarFiles() throws 
Exception {
     /*
      * Remove the "dummy" jar from the VM.
      */
-    vm0.invoke(() -> {
-      for (DeployedJar jarClassLoader : 
ClassPathLoader.getLatest().getJarDeployer()
-          .findDeployedJars()) {
-        if (jarClassLoader.getJarName().startsWith(jarName)) {
-          
ClassPathLoader.getLatest().getJarDeployer().undeploy(jarClassLoader.getJarName());
+    vm0.invoke(new SerializableCallable() {
+      @Override
+      public Object call() throws Exception {
+        for (DeployedJar jarClassLoader : 
ClassPathLoader.getLatest().getJarDeployer()
+            .findDeployedJars()) {
+          if (jarClassLoader.getJarName().startsWith(jarName)) {
+            
ClassPathLoader.getLatest().getJarDeployer().undeploy(jarClassLoader.getJarName());
+          }
         }
+        return null;
       }
     });
 
diff --git 
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/PrepareBackupFactoryTest.java
 
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/PrepareBackupFactoryTest.java
index 3cf89cd709..41431a35b2 100644
--- 
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/PrepareBackupFactoryTest.java
+++ 
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/PrepareBackupFactoryTest.java
@@ -14,9 +14,8 @@
  */
 package org.apache.geode.internal.cache.backup;
 
-import static org.assertj.core.api.Assertions.assertThat;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
+import static org.assertj.core.api.Assertions.*;
+import static org.mockito.Mockito.*;
 
 import java.util.HashSet;
 import java.util.Set;
diff --git 
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/PrepareBackupOperationTest.java
 
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/PrepareBackupOperationTest.java
index 71dd0c136c..2e4246aeba 100644
--- 
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/PrepareBackupOperationTest.java
+++ 
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/PrepareBackupOperationTest.java
@@ -14,15 +14,8 @@
  */
 package org.apache.geode.internal.cache.backup;
 
-import static org.assertj.core.api.Assertions.assertThat;
-import static org.mockito.Mockito.doAnswer;
-import static org.mockito.Mockito.doThrow;
-import static org.mockito.Mockito.eq;
-import static org.mockito.Mockito.inOrder;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
+import static org.assertj.core.api.Assertions.*;
+import static org.mockito.Mockito.*;
 
 import java.io.IOException;
 import java.util.HashSet;
diff --git 
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/PrepareBackupRequestTest.java
 
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/PrepareBackupRequestTest.java
index dd0e643c8f..f1700bc185 100644
--- 
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/PrepareBackupRequestTest.java
+++ 
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/PrepareBackupRequestTest.java
@@ -14,12 +14,8 @@
  */
 package org.apache.geode.internal.cache.backup;
 
-import static org.assertj.core.api.Assertions.assertThat;
-import static org.mockito.Mockito.eq;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
+import static org.assertj.core.api.Assertions.*;
+import static org.mockito.Mockito.*;
 
 import java.io.IOException;
 import java.util.HashSet;
diff --git 
a/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/PersistentColocatedPartitionedRegionDUnitTest.java
 
b/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/PersistentColocatedPartitionedRegionDUnitTest.java
index 7a60eaf0f1..d7c7063256 100644
--- 
a/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/PersistentColocatedPartitionedRegionDUnitTest.java
+++ 
b/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/PersistentColocatedPartitionedRegionDUnitTest.java
@@ -2138,6 +2138,144 @@ public Object call() throws Exception {
     checkData(vm0, 0, NUM_BUCKETS, "a", "region2");
   }
 
+  /**
+   * Test what happens when we restart persistent members while there is an 
accessor concurrently
+   * performing puts. This is for bug 43899
+   */
+  @Test
+  public void testRecoverySystemWithConcurrentPutter() throws Throwable {
+    Host host = Host.getHost(0);
+    VM vm0 = host.getVM(0);
+    VM vm1 = host.getVM(1);
+    VM vm2 = host.getVM(2);
+    VM vm3 = host.getVM(3);
+
+    // Define all of the runnables used in this test
+
+    // runnable to create accessors
+    SerializableRunnable createAccessor = new 
SerializableRunnable("createAccessor") {
+      public void run() {
+        Cache cache = getCache();
+
+        AttributesFactory af = new AttributesFactory();
+        PartitionAttributesFactory paf = new PartitionAttributesFactory();
+        paf.setRedundantCopies(1);
+        paf.setLocalMaxMemory(0);
+        af.setPartitionAttributes(paf.create());
+        af.setDataPolicy(DataPolicy.PARTITION);
+        cache.createRegion(getPartitionedRegionName(), af.create());
+
+        paf.setColocatedWith(getPartitionedRegionName());
+        af.setPartitionAttributes(paf.create());
+        cache.createRegion("region2", af.create());
+      }
+    };
+
+    // runnable to create PRs
+    SerializableRunnable createPRs = new SerializableRunnable("createPRs") {
+      public void run() {
+        Cache cache = getCache();
+
+        DiskStore ds = cache.findDiskStore("disk");
+        if (ds == null) {
+          ds = 
cache.createDiskStoreFactory().setDiskDirs(getDiskDirs()).create("disk");
+        }
+        AttributesFactory af = new AttributesFactory();
+        PartitionAttributesFactory paf = new PartitionAttributesFactory();
+        paf.setRedundantCopies(1);
+        af.setPartitionAttributes(paf.create());
+        af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
+        af.setDiskStoreName("disk");
+        cache.createRegion(getPartitionedRegionName(), af.create());
+
+        paf.setColocatedWith(getPartitionedRegionName());
+        af.setPartitionAttributes(paf.create());
+        cache.createRegion("region2", af.create());
+      }
+    };
+
+    // runnable to close the cache.
+    SerializableRunnable closeCache = new SerializableRunnable("closeCache") {
+      public void run() {
+        closeCache();
+      }
+    };
+
+    // Runnable to do a bunch of puts handle exceptions
+    // due to the fact that member is offline.
+    SerializableRunnable doABunchOfPuts = new 
SerializableRunnable("doABunchOfPuts") {
+      public void run() {
+        Cache cache = getCache();
+        Region region = cache.getRegion(getPartitionedRegionName());
+        try {
+          for (int i = 0;; i++) {
+            try {
+              region.get(i % NUM_BUCKETS);
+            } catch (PartitionOfflineException expected) {
+              // do nothing.
+            } catch (PartitionedRegionStorageException expected) {
+              // do nothing.
+            }
+            Thread.yield();
+          }
+        } catch (CacheClosedException expected) {
+          // ok, we're done.
+        }
+      }
+    };
+
+
+    // Runnable to clean up disk dirs on a members
+    SerializableRunnable cleanDiskDirs = new SerializableRunnable("Clean disk 
dirs") {
+      public void run() {
+        cleanDiskDirs();
+      }
+    };
+
+    // Create the PR two members
+    vm1.invoke(createPRs);
+    vm2.invoke(createPRs);
+
+    // create the accessor.
+    vm0.invoke(createAccessor);
+
+
+    // Create some buckets.
+    createData(vm0, 0, NUM_BUCKETS, "a");
+    createData(vm0, 0, NUM_BUCKETS, "a", "region2");
+
+
+    // backup the system. We use this to get a snapshot of vm1 and vm2
+    // when they both are online. Recovering from this backup simulates
+    // a simulataneous kill and recovery.
+    backup(vm3);
+
+    // close vm1 and vm2.
+    vm1.invoke(closeCache);
+    vm2.invoke(closeCache);
+
+    // restore the backup
+    vm1.invoke(cleanDiskDirs);
+    vm2.invoke(cleanDiskDirs);
+    restoreBackup(2);
+
+    // in vm0, start doing a bunch of concurrent puts.
+    AsyncInvocation async0 = vm0.invokeAsync(doABunchOfPuts);
+
+    // This recovery should not hang (that's what we're testing for
+    // here.
+    AsyncInvocation async1 = vm1.invokeAsync(createPRs);
+    AsyncInvocation async2 = vm2.invokeAsync(createPRs);
+    async1.getResult(MAX_WAIT);
+    async2.getResult(MAX_WAIT);
+
+    // close the cache in vm0 to stop the async puts.
+    vm0.invoke(closeCache);
+
+    // make sure we didn't get an exception
+    async0.getResult(MAX_WAIT);
+  }
+
   @Category(FlakyTest.class) // GEODE-506: time sensitive, async actions with 
30 sec max
   @Test
   public void testRebalanceWithOfflineChildRegion() throws Throwable {


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


> Create plugin system for specifying where a backup is stored
> ------------------------------------------------------------
>
>                 Key: GEODE-3799
>                 URL: https://issues.apache.org/jira/browse/GEODE-3799
>             Project: Geode
>          Issue Type: Sub-task
>          Components: persistence
>            Reporter: Nick Reich
>            Assignee: Nick Reich
>             Fix For: 1.4.0
>
>
> The current logic merges the moving/copying of files with the determining of 
> what to backup. To make it possible to store a backup in cloud storage or 
> other locations, we need to separate these concerns, putting the variable, 
> location-based logic, into a plugin architecture.



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

Reply via email to