[
https://issues.apache.org/jira/browse/GEODE-3799?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16291747#comment-16291747
]
ASF GitHub Bot commented on GEODE-3799:
---------------------------------------
nreich closed pull request #1109: GEODE-3799: Move backups towards a pluggable
architecture
URL: https://github.com/apache/geode/pull/1109
This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:
As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):
diff --git
a/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreImpl.java
b/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreImpl.java
index e3701e0a1c..e3ea1fb39c 100644
---
a/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreImpl.java
+++
b/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreImpl.java
@@ -28,6 +28,8 @@
import java.nio.channels.ClosedByInterruptException;
import java.nio.channels.FileChannel;
import java.nio.channels.FileLock;
+import java.nio.file.Files;
+import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
@@ -36,6 +38,7 @@
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
+import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
@@ -58,9 +61,11 @@
import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
+import java.util.stream.Collectors;
import it.unimi.dsi.fastutil.ints.IntOpenHashSet;
import it.unimi.dsi.fastutil.longs.LongOpenHashSet;
+import org.apache.commons.io.FileUtils;
import org.apache.logging.log4j.Logger;
import org.apache.geode.CancelCriterion;
@@ -1957,6 +1962,8 @@ private void loadFiles(boolean needsOplogs) {
deleteFiles(overflowFileFilter);
}
+ cleanupOrphanedBackupDirectories();
+
persistentOplogs.createOplogs(needsOplogs, persistentBackupFiles);
finished = true;
@@ -1982,6 +1989,28 @@ private void loadFiles(boolean needsOplogs) {
}
}
+ private void cleanupOrphanedBackupDirectories() {
+ for (DirectoryHolder directoryHolder : getDirectoryHolders()) {
+ try {
+ List<Path> backupDirectories =
Files.list(directoryHolder.getDir().toPath())
+ .filter((path) -> path.getFileName().toString()
+ .startsWith(BackupManager.DATA_STORES_TEMPORARY_DIRECTORY))
+ .filter(p -> Files.isDirectory(p)).collect(Collectors.toList());
+ for (Path backupDirectory : backupDirectories) {
+ try {
+ logger.info("Deleting orphaned backup temporary directory: " +
backupDirectory);
+ FileUtils.deleteDirectory(backupDirectory.toFile());
+ } catch (IOException e) {
+ logger.warn("Failed to remove orphaned backup temporary directory:
" + backupDirectory,
+ e);
+ }
+ }
+ } catch (IOException e) {
+ logger.warn(e);
+ }
+ }
+ }
+
/**
* The diskStats are at PR level.Hence if the region is a bucket region, the
stats should not be
* closed, but the figures of entriesInVM and overflowToDisk contributed by
that bucket need to be
diff --git
a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupDefinition.java
b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupDefinition.java
new file mode 100644
index 0000000000..685d03b474
--- /dev/null
+++
b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupDefinition.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
contributor license
+ * agreements. See the NOTICE file distributed with this work for additional
information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache
License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express
+ * or implied. See the License for the specific language governing permissions
and limitations under
+ * the License.
+ */
+package org.apache.geode.internal.cache.backup;
+
+import java.nio.file.Path;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.geode.cache.DiskStore;
+
+class BackupDefinition {
+ private final Map<DiskStore, Set<Path>> oplogFilesByDiskStore = new
HashMap<>();
+ private final Set<Path> configFiles = new HashSet<>();
+ private final Set<Path> userFiles = new HashSet<>();
+ private final Set<Path> deployedJars = new HashSet<>();
+ private final Map<DiskStore, Path> diskInitFiles = new HashMap<>();
+ private RestoreScript restoreScript;
+
+ void addConfigFileToBackup(Path configFile) {
+ configFiles.add(configFile);
+ }
+
+ void addUserFilesToBackup(Path userFile) {
+ userFiles.add(userFile);
+ }
+
+ void addDeployedJarToBackup(Path deployedJar) {
+ deployedJars.add(deployedJar);
+ }
+
+ void addDiskInitFile(DiskStore diskStore, Path diskInitFile) {
+ diskInitFiles.put(diskStore, diskInitFile);
+ }
+
+ void setRestoreScript(RestoreScript restoreScript) {
+ this.restoreScript = restoreScript;
+ }
+
+ Map<DiskStore, Collection<Path>> getOplogFilesByDiskStore() {
+ return Collections.unmodifiableMap(oplogFilesByDiskStore);
+ }
+
+ Set<Path> getConfigFiles() {
+ return Collections.unmodifiableSet(configFiles);
+ }
+
+ Set<Path> getUserFiles() {
+ return Collections.unmodifiableSet(userFiles);
+ }
+
+ Set<Path> getDeployedJars() {
+ return Collections.unmodifiableSet(deployedJars);
+ }
+
+ Map<DiskStore, Path> getDiskInitFiles() {
+ return Collections.unmodifiableMap(diskInitFiles);
+ }
+
+ RestoreScript getRestoreScript() {
+ return restoreScript;
+ }
+
+ void addOplogFileToBackup(DiskStore diskStore, Path fileLocation) {
+ Set<Path> files = oplogFilesByDiskStore.computeIfAbsent(diskStore, k ->
new HashSet<>());
+ files.add(fileLocation);
+ }
+}
diff --git
a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupDestination.java
b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupDestination.java
new file mode 100644
index 0000000000..a84deea9a2
--- /dev/null
+++
b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupDestination.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
contributor license
+ * agreements. See the NOTICE file distributed with this work for additional
information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache
License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express
+ * or implied. See the License for the specific language governing permissions
and limitations under
+ * the License.
+ */
+package org.apache.geode.internal.cache.backup;
+
+import java.io.IOException;
+
+public interface BackupDestination {
+ String USER_FILES_DIRECTORY = "user";
+ String DEPLOYED_JARS_DIRECTORY = "user";
+ String CONFIG_DIRECTORY = "config";
+ String BACKUP_DIR_PREFIX = "dir";
+ String README_FILE = "README_FILE.txt";
+ String DATA_STORES_DIRECTORY = "diskstores";
+
+ void backupFiles(BackupDefinition backupDefinition) throws IOException;
+}
diff --git
a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupManager.java
b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupManager.java
index f773b218f6..23ca4c1f86 100644
---
a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupManager.java
+++
b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupManager.java
@@ -15,10 +15,13 @@
package org.apache.geode.internal.cache.backup;
import java.io.File;
-import java.io.FileOutputStream;
import java.io.IOException;
+import java.net.URISyntaxException;
import java.net.URL;
import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.nio.file.StandardCopyOption;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
@@ -36,7 +39,6 @@
import org.apache.geode.cache.persistence.PersistentID;
import org.apache.geode.distributed.DistributedSystem;
import org.apache.geode.distributed.internal.DM;
-import org.apache.geode.distributed.internal.DistributionConfig;
import org.apache.geode.distributed.internal.MembershipListener;
import
org.apache.geode.distributed.internal.membership.InternalDistributedMember;
import org.apache.geode.internal.ClassPathLoader;
@@ -48,39 +50,45 @@
import org.apache.geode.internal.cache.GemFireCacheImpl;
import org.apache.geode.internal.cache.InternalCache;
import org.apache.geode.internal.cache.Oplog;
-import org.apache.geode.internal.i18n.LocalizedStrings;
import org.apache.geode.internal.logging.LogService;
/**
* This class manages the state an logic to backup a single cache.
*/
-public class BackupManager implements MembershipListener {
+public class BackupManager {
private static final Logger logger = LogService.getLogger();
static final String INCOMPLETE_BACKUP_FILE = "INCOMPLETE_BACKUP_FILE";
private static final String BACKUP_DIR_PREFIX = "dir";
- private static final String README_FILE = "README_FILE.txt";
private static final String DATA_STORES_DIRECTORY = "diskstores";
+ public static final String DATA_STORES_TEMPORARY_DIRECTORY = "backupTemp_";
private static final String USER_FILES = "user";
private static final String CONFIG_DIRECTORY = "config";
+ private final MembershipListener membershipListener = new
BackupMembershipListener();
private final Map<DiskStoreImpl, DiskStoreBackup> backupByDiskStore = new
HashMap<>();
private final RestoreScript restoreScript = new RestoreScript();
private final InternalDistributedMember sender;
private final InternalCache cache;
private final CountDownLatch allowDestroys = new CountDownLatch(1);
+ private final BackupDefinition backupDefinition = new BackupDefinition();
+ private final String diskStoreDirectoryName;
private volatile boolean isCancelled = false;
+ private Path tempDirectory;
+ private final Map<DiskStore, Map<DirectoryHolder, Path>>
diskStoreDirTempDirsByDiskStore =
+ new HashMap<>();
public BackupManager(InternalDistributedMember sender, InternalCache
gemFireCache) {
this.sender = sender;
this.cache = gemFireCache;
+ diskStoreDirectoryName = DATA_STORES_TEMPORARY_DIRECTORY +
System.currentTimeMillis();
}
public void validateRequestingAdmin() {
// We need to watch for pure admin guys that depart. this
allMembershipListener set
// looks like it should receive those events.
- Set allIds =
getDistributionManager().addAllMembershipListenerAndGetAllIds(this);
+ Set allIds =
getDistributionManager().addAllMembershipListenerAndGetAllIds(membershipListener);
if (!allIds.contains(sender)) {
cleanup();
throw new IllegalStateException("The admin member requesting a backup
has already departed");
@@ -106,52 +114,30 @@ public void validateRequestingAdmin() {
if (abort) {
return new HashSet<>();
}
- HashSet<PersistentID> persistentIds = new HashSet<>();
+ tempDirectory = Files.createTempDirectory("backup_" +
System.currentTimeMillis());
File backupDir = getBackupDir(targetDir);
- // Make sure our baseline is okay for this member
+ // Make sure our baseline is okay for this member, then create inspector
for baseline backup
baselineDir = checkBaseline(baselineDir);
-
- // Create an inspector for the baseline backup
BackupInspector inspector =
(baselineDir == null ? null :
BackupInspector.createInspector(baselineDir));
-
File storesDir = new File(backupDir, DATA_STORES_DIRECTORY);
Collection<DiskStore> diskStores =
cache.listDiskStoresIncludingRegionOwned();
- Map<DiskStoreImpl, DiskStoreBackup> backupByDiskStore = new HashMap<>();
-
- boolean foundPersistentData = false;
- for (DiskStore store : diskStores) {
- DiskStoreImpl diskStore = (DiskStoreImpl) store;
- if (diskStore.hasPersistedData()) {
- if (!foundPersistentData) {
- createBackupDir(backupDir);
- foundPersistentData = true;
- }
- File diskStoreDir = new File(storesDir, getBackupDirName(diskStore));
- diskStoreDir.mkdir();
- DiskStoreBackup backup = startDiskStoreBackup(diskStore,
diskStoreDir, inspector);
- backupByDiskStore.put(diskStore, backup);
- }
- diskStore.releaseBackupLock();
- }
+ Map<DiskStoreImpl, DiskStoreBackup> backupByDiskStores =
+ startDiskStoreBackups(inspector, storesDir, diskStores);
allowDestroys.countDown();
+ HashSet<PersistentID> persistentIds =
finishDiskStoreBackups(backupByDiskStores);
- for (Map.Entry<DiskStoreImpl, DiskStoreBackup> entry :
backupByDiskStore.entrySet()) {
- DiskStoreImpl diskStore = entry.getKey();
- completeBackup(diskStore, entry.getValue());
- diskStore.getStats().endBackup();
- persistentIds.add(diskStore.getPersistentID());
+ if (!backupByDiskStores.isEmpty()) {
+ backupAdditionalFiles(backupDir);
+ backupDefinition.setRestoreScript(restoreScript);
}
- if (!backupByDiskStore.isEmpty()) {
- backupAdditionalFiles(backupDir);
- restoreScript.generate(backupDir);
- File incompleteFile = new File(backupDir, INCOMPLETE_BACKUP_FILE);
- if (!incompleteFile.delete()) {
- throw new IOException("Could not delete file " +
INCOMPLETE_BACKUP_FILE);
- }
+ if (!backupByDiskStores.isEmpty()) {
+ // TODO: allow different stategies...
+ BackupDestination backupDestination = new
FileSystemBackupDestination(backupDir.toPath());
+ backupDestination.backupFiles(backupDefinition);
}
return persistentIds;
@@ -161,10 +147,50 @@ public void validateRequestingAdmin() {
}
}
+ private HashSet<PersistentID> finishDiskStoreBackups(
+ Map<DiskStoreImpl, DiskStoreBackup> backupByDiskStores) throws
IOException {
+ HashSet<PersistentID> persistentIds = new HashSet<>();
+ for (Map.Entry<DiskStoreImpl, DiskStoreBackup> entry :
backupByDiskStores.entrySet()) {
+ DiskStoreImpl diskStore = entry.getKey();
+ completeBackup(diskStore, entry.getValue());
+ diskStore.getStats().endBackup();
+ persistentIds.add(diskStore.getPersistentID());
+ }
+ return persistentIds;
+ }
+
+ private Map<DiskStoreImpl, DiskStoreBackup>
startDiskStoreBackups(BackupInspector inspector,
+ File storesDir, Collection<DiskStore> diskStores) throws IOException {
+ Map<DiskStoreImpl, DiskStoreBackup> backupByDiskStore = new HashMap<>();
+
+ for (DiskStore store : diskStores) {
+ DiskStoreImpl diskStore = (DiskStoreImpl) store;
+ if (diskStore.hasPersistedData()) {
+ File diskStoreDir = new File(storesDir, getBackupDirName(diskStore));
+ DiskStoreBackup backup = startDiskStoreBackup(diskStore, diskStoreDir,
inspector);
+ backupByDiskStore.put(diskStore, backup);
+ }
+ diskStore.releaseBackupLock();
+ }
+ return backupByDiskStore;
+ }
+
public void abort() {
cleanup();
}
+ public boolean isCancelled() {
+ return isCancelled;
+ }
+
+ public void waitForBackup() {
+ try {
+ allowDestroys.await();
+ } catch (InterruptedException e) {
+ throw new InternalGemFireError(e);
+ }
+ }
+
private DM getDistributionManager() {
return cache.getInternalDistributedSystem().getDistributionManager();
}
@@ -172,11 +198,33 @@ private DM getDistributionManager() {
private void cleanup() {
isCancelled = true;
allowDestroys.countDown();
+ cleanupTemporaryFiles();
releaseBackupLocks();
- getDistributionManager().removeAllMembershipListener(this);
+ getDistributionManager().removeAllMembershipListener(membershipListener);
cache.clearBackupManager();
}
+ private void cleanupTemporaryFiles() {
+ if (tempDirectory != null) {
+ try {
+ FileUtils.deleteDirectory(tempDirectory.toFile());
+ } catch (IOException e) {
+ logger.warn("Unable to delete temporary directory created during
backup, " + tempDirectory,
+ e);
+ }
+ }
+ for (Map<DirectoryHolder, Path> diskStoreDirToTempDirMap :
diskStoreDirTempDirsByDiskStore
+ .values()) {
+ for (Path tempDir : diskStoreDirToTempDirMap.values()) {
+ try {
+ FileUtils.deleteDirectory(tempDir.toFile());
+ } catch (IOException e) {
+ logger.warn("Unable to delete temporary directory created during
backup, " + tempDir, e);
+ }
+ }
+ }
+ }
+
private void releaseBackupLocks() {
for (DiskStore store : cache.listDiskStoresIncludingRegionOwned()) {
((DiskStoreImpl) store).releaseBackupLock();
@@ -193,9 +241,7 @@ private void releaseBackupLocks() {
private File findBaselineForThisMember(File baselineParentDir) {
File baselineDir = null;
- /*
- * Find the first matching DiskStoreId directory for this member.
- */
+ // Find the first matching DiskStoreId directory for this member.
for (DiskStore diskStore : cache.listDiskStoresIncludingRegionOwned()) {
File[] matchingFiles = baselineParentDir
.listFiles((file, name) ->
name.endsWith(getBackupDirName((DiskStoreImpl) diskStore)));
@@ -243,10 +289,9 @@ private File checkBaseline(File baselineParentDir) throws
IOException {
}
private void backupAdditionalFiles(File backupDir) throws IOException {
- backupConfigFiles(backupDir);
+ backupConfigFiles();
backupUserFiles(backupDir);
backupDeployedJars(backupDir);
-
}
/**
@@ -266,12 +311,7 @@ private void completeBackup(DiskStoreImpl diskStore,
DiskStoreBackup backup) thr
if (isCancelled()) {
break;
}
- // Copy theoplog to the destination directory
- int index = oplog.getDirectoryHolder().getArrayIndex();
- File backupDir = getBackupDir(backup.getTargetDir(), index);
- // TODO prpersist - We could probably optimize this to *move* the files
- // that we know are supposed to be deleted.
- backupOplog(backupDir, oplog);
+ copyOplog(diskStore, tempDirectory.toFile(), oplog);
// Allow the oplog to be deleted, and process any pending delete
backup.backupFinished(oplog);
@@ -332,15 +372,15 @@ private DiskStoreBackup
startDiskStoreBackup(DiskStoreImpl diskStore, File targe
logger.debug("snapshotting oplogs for disk store {}",
diskStore.getName());
}
- createDiskStoreBackupDirs(diskStore, targetDir);
+ addDiskStoreDirectoriesToRestoreScript(diskStore, targetDir);
restoreScript.addExistenceTest(diskStore.getDiskInitFile().getIFFile());
// Contains all oplogs that will backed up
- Oplog[] allOplogs = null;
// Incremental backup so filter out oplogs that have already been
// backed up
+ Oplog[] allOplogs;
if (null != baselineInspector) {
allOplogs = filterBaselineOplogs(diskStore, baselineInspector);
} else {
@@ -352,9 +392,13 @@ private DiskStoreBackup startDiskStoreBackup(DiskStoreImpl
diskStore, File targe
backup = new DiskStoreBackup(allOplogs, targetDir);
backupByDiskStore.put(diskStore, backup);
- // copy the init file
- File firstDir = getBackupDir(targetDir,
diskStore.getInforFileDirIndex());
- diskStore.getDiskInitFile().copyTo(firstDir);
+
+ // TODO cleanup new location definition code
+ /*
+ * Path diskstoreDir = getBackupDir(tempDir.toFile(),
+ * diskStore.getInforFileDirIndex()).toPath();
Files.createDirectories(diskstoreDir);
+ */
+ backupDiskInitFile(diskStore, tempDirectory);
diskStore.getPersistentOplogSet().forceRoll(null);
if (logger.isDebugEnabled()) {
@@ -373,15 +417,20 @@ private DiskStoreBackup
startDiskStoreBackup(DiskStoreImpl diskStore, File targe
return backup;
}
- private void createDiskStoreBackupDirs(DiskStoreImpl diskStore, File
targetDir)
- throws IOException {
- // Create the directories for this disk store
+ private void backupDiskInitFile(DiskStoreImpl diskStore, Path tempDir)
throws IOException {
+ File diskInitFile = diskStore.getDiskInitFile().getIFFile();
+ String subDir = Integer.toString(diskStore.getInforFileDirIndex());
+ Files.createDirectories(tempDir.resolve(subDir));
+ Files.copy(diskInitFile.toPath(),
tempDir.resolve(subDir).resolve(diskInitFile.getName()),
+ StandardCopyOption.COPY_ATTRIBUTES);
+ backupDefinition.addDiskInitFile(diskStore,
+ tempDir.resolve(subDir).resolve(diskInitFile.getName()));
+ }
+
+ private void addDiskStoreDirectoriesToRestoreScript(DiskStoreImpl diskStore,
File targetDir) {
DirectoryHolder[] directories = diskStore.getDirectoryHolders();
for (int i = 0; i < directories.length; i++) {
File backupDir = getBackupDir(targetDir, i);
- if (!backupDir.mkdirs()) {
- throw new IOException("Could not create directory " + backupDir);
- }
restoreScript.addFile(directories[i].getDir(), backupDir);
}
}
@@ -393,8 +442,7 @@ private void createDiskStoreBackupDirs(DiskStoreImpl
diskStore, File targetDir)
* @param baselineInspector the inspector for the previous backup.
* @return an array of Oplogs to be copied for an incremental backup.
*/
- private Oplog[] filterBaselineOplogs(DiskStoreImpl diskStore,
BackupInspector baselineInspector)
- throws IOException {
+ private Oplog[] filterBaselineOplogs(DiskStoreImpl diskStore,
BackupInspector baselineInspector) {
File baselineDir =
new File(baselineInspector.getBackupDir(),
BackupManager.DATA_STORES_DIRECTORY);
baselineDir = new File(baselineDir, getBackupDirName(diskStore));
@@ -409,9 +457,7 @@ private void createDiskStoreBackupDirs(DiskStoreImpl
diskStore, File targetDir)
// Total list of member oplogs
Oplog[] allOplogs = diskStore.getAllOplogsForBackup();
- /*
- * Loop through operation logs and see if they are already part of the
baseline backup.
- */
+ // Loop through operation logs and see if they are already part of the
baseline backup.
for (Oplog log : allOplogs) {
// See if they are backed up in the current baseline
Map<File, File> oplogMap = log.mapBaseline(baselineOplogFiles);
@@ -422,9 +468,7 @@ private void createDiskStoreBackupDirs(DiskStoreImpl
diskStore, File targetDir)
}
if (oplogMap.isEmpty()) {
- /*
- * These are fresh operation log files so lets back them up.
- */
+ // These are fresh operation log files so lets back them up.
oplogList.add(log);
} else {
/*
@@ -455,42 +499,42 @@ private File getBackupDir(File targetDir, int index) {
return new File(targetDir, BACKUP_DIR_PREFIX + index);
}
- private void backupConfigFiles(File backupDir) throws IOException {
- File configBackupDir = new File(backupDir, CONFIG_DIRECTORY);
- configBackupDir.mkdirs();
- URL url = cache.getCacheXmlURL();
- if (url != null) {
- File cacheXMLBackup =
- new File(configBackupDir,
DistributionConfig.DEFAULT_CACHE_XML_FILE.getName());
- FileUtils.copyFile(new File(cache.getCacheXmlURL().getFile()),
cacheXMLBackup);
- }
+ private void backupConfigFiles() throws IOException {
+ Files.createDirectories(tempDirectory.resolve(CONFIG_DIRECTORY));
+ addConfigFileToBackup(cache.getCacheXmlURL());
+ addConfigFileToBackup(DistributedSystem.getPropertiesFileURL());
+ // TODO: should the gfsecurity.properties file be backed up?
+ }
- URL propertyURL = DistributedSystem.getPropertiesFileURL();
- if (propertyURL != null) {
- File propertyBackup =
- new File(configBackupDir, DistributionConfig.GEMFIRE_PREFIX +
"properties");
- FileUtils.copyFile(new File(DistributedSystem.getPropertiesFile()),
propertyBackup);
+ private void addConfigFileToBackup(URL fileUrl) throws IOException {
+ if (fileUrl != null) {
+ try {
+ Path source = Paths.get(fileUrl.toURI());
+ Path destination =
tempDirectory.resolve(CONFIG_DIRECTORY).resolve(source.getFileName());
+ Files.copy(source, destination, StandardCopyOption.COPY_ATTRIBUTES);
+ backupDefinition.addConfigFileToBackup(destination);
+ } catch (URISyntaxException e) {
+ throw new IOException(e);
+ }
}
-
- // TODO: should the gfsecurity.properties file be backed up?
}
private void backupUserFiles(File backupDir) throws IOException {
+ Files.createDirectories(tempDirectory.resolve(USER_FILES));
List<File> backupFiles = cache.getBackupFiles();
File userBackupDir = new File(backupDir, USER_FILES);
- if (!userBackupDir.exists()) {
- userBackupDir.mkdir();
- }
for (File original : backupFiles) {
if (original.exists()) {
original = original.getAbsoluteFile();
- File dest = new File(userBackupDir, original.getName());
- restoreScript.addUserFile(original, dest);
+ Path destination =
tempDirectory.resolve(USER_FILES).resolve(original.getName());
if (original.isDirectory()) {
- FileUtils.copyDirectory(original, dest);
+ FileUtils.copyDirectory(original, destination.toFile());
} else {
- FileUtils.copyFile(original, dest);
+ Files.copy(original.toPath(), destination,
StandardCopyOption.COPY_ATTRIBUTES);
}
+ backupDefinition.addUserFilesToBackup(destination);
+ File restoreScriptDestination = new File(userBackupDir,
original.getName());
+ restoreScript.addUserFile(original, restoreScriptDestination);
}
}
}
@@ -505,41 +549,34 @@ private void backupDeployedJars(File backupDir) throws
IOException {
JarDeployer deployer = null;
try {
- /*
- * Suspend any user deployed jar file updates during this backup.
- */
+ // Suspend any user deployed jar file updates during this backup.
deployer = ClassPathLoader.getLatest().getJarDeployer();
deployer.suspendAll();
List<DeployedJar> jarList = deployer.findDeployedJars();
if (!jarList.isEmpty()) {
File userBackupDir = new File(backupDir, USER_FILES);
- if (!userBackupDir.exists()) {
- userBackupDir.mkdir();
- }
- for (DeployedJar loader : jarList) {
- File source = new File(loader.getFileCanonicalPath());
- File dest = new File(userBackupDir, source.getName());
- restoreScript.addFile(source, dest);
- if (source.isDirectory()) {
- FileUtils.copyDirectory(source, dest);
- } else {
- FileUtils.copyFile(source, dest);
- }
+ for (DeployedJar jar : jarList) {
+ File source = new File(jar.getFileCanonicalPath());
+ String sourceFileName = source.getName();
+ Path destination =
tempDirectory.resolve(USER_FILES).resolve(sourceFileName);
+ Files.copy(source.toPath(), destination,
StandardCopyOption.COPY_ATTRIBUTES);
+ backupDefinition.addDeployedJarToBackup(destination);
+
+ File restoreScriptDestination = new File(userBackupDir,
sourceFileName);
+ restoreScript.addFile(source, restoreScriptDestination);
}
}
} finally {
- /*
- * Re-enable user deployed jar file updates.
- */
- if (null != deployer) {
+ // Re-enable user deployed jar file updates.
+ if (deployer != null) {
deployer.resumeAll();
}
}
}
- private File getBackupDir(File targetDir) throws IOException {
+ private File getBackupDir(File targetDir) {
InternalDistributedMember memberId =
cache.getInternalDistributedSystem().getDistributedMember();
String vmId = memberId.toString();
@@ -547,47 +584,22 @@ private File getBackupDir(File targetDir) throws
IOException {
return new File(targetDir, vmId);
}
- private void createBackupDir(File backupDir) throws IOException {
- if (backupDir.exists()) {
- throw new IOException("Backup directory " + backupDir.getAbsolutePath()
+ " already exists.");
- }
-
- if (!backupDir.mkdirs()) {
- throw new IOException("Could not create directory: " + backupDir);
- }
-
- File incompleteFile = new File(backupDir, INCOMPLETE_BACKUP_FILE);
- if (!incompleteFile.createNewFile()) {
- throw new IOException("Could not create file: " + incompleteFile);
- }
-
- File readme = new File(backupDir, README_FILE);
- FileOutputStream fos = new FileOutputStream(readme);
-
- try {
- String text = LocalizedStrings.BackupManager_README.toLocalizedString();
- fos.write(text.getBytes());
- } finally {
- fos.close();
- }
- }
-
- private void backupOplog(File targetDir, Oplog oplog) throws IOException {
- File crfFile = oplog.getCrfFile();
- backupFile(targetDir, crfFile);
-
- File drfFile = oplog.getDrfFile();
- backupFile(targetDir, drfFile);
+ private void copyOplog(DiskStore diskStore, File targetDir, Oplog oplog)
throws IOException {
+ DirectoryHolder dirHolder = oplog.getDirectoryHolder();
+ backupFile(diskStore, dirHolder, targetDir, oplog.getCrfFile());
+ backupFile(diskStore, dirHolder, targetDir, oplog.getDrfFile());
oplog.finishKrf();
- File krfFile = oplog.getKrfFile();
- backupFile(targetDir, krfFile);
+ backupFile(diskStore, dirHolder, targetDir, oplog.getKrfFile());
}
- private void backupFile(File targetDir, File file) throws IOException {
+ private void backupFile(DiskStore diskStore, DirectoryHolder dirHolder, File
targetDir, File file)
+ throws IOException {
if (file != null && file.exists()) {
try {
- Files.createLink(targetDir.toPath().resolve(file.getName()),
file.toPath());
+ Path tempDiskDir = getTempDirForDiskStore(diskStore, dirHolder);
+ Files.createLink(tempDiskDir.resolve(file.getName()), file.toPath());
+ backupDefinition.addOplogFileToBackup(diskStore,
tempDiskDir.resolve(file.getName()));
} catch (IOException | UnsupportedOperationException e) {
logger.warn("Unable to create hard link for + {}. Reverting to file
copy", targetDir);
FileUtils.copyFileToDirectory(file, targetDir);
@@ -595,35 +607,55 @@ private void backupFile(File targetDir, File file) throws
IOException {
}
}
+ private Path getTempDirForDiskStore(DiskStore diskStore, DirectoryHolder
dirHolder)
+ throws IOException {
+ Map<DirectoryHolder, Path> tempDirByDirectoryHolder =
+ diskStoreDirTempDirsByDiskStore.get(diskStore);
+ if (tempDirByDirectoryHolder == null) {
+ tempDirByDirectoryHolder = new HashMap<>();
+ diskStoreDirTempDirsByDiskStore.put(diskStore, tempDirByDirectoryHolder);
+ }
+ Path directory = tempDirByDirectoryHolder.get(dirHolder);
+ if (directory != null) {
+ return directory;
+ }
+
+ File diskStoreDir = dirHolder.getDir();
+ directory = diskStoreDir.toPath().resolve(diskStoreDirectoryName);
+ Files.createDirectories(directory);
+ tempDirByDirectoryHolder.put(dirHolder, directory);
+ return directory;
+ }
+
private String cleanSpecialCharacters(String string) {
return string.replaceAll("[^\\w]+", "_");
}
- public void memberDeparted(InternalDistributedMember id, boolean crashed) {
- cleanup();
+ public DiskStoreBackup getBackupForDiskStore(DiskStoreImpl diskStore) {
+ return backupByDiskStore.get(diskStore);
}
- public void memberJoined(InternalDistributedMember id) {}
-
- public void quorumLost(Set<InternalDistributedMember> failures,
- List<InternalDistributedMember> remaining) {}
-
- public void memberSuspect(InternalDistributedMember id,
InternalDistributedMember whoSuspected,
- String reason) {}
+ private class BackupMembershipListener implements MembershipListener {
+ @Override
+ public void memberDeparted(InternalDistributedMember id, boolean crashed) {
+ cleanup();
+ }
- public void waitForBackup() {
- try {
- allowDestroys.await();
- } catch (InterruptedException e) {
- throw new InternalGemFireError(e);
+ @Override
+ public void memberJoined(InternalDistributedMember id) {
+ // unused
}
- }
- public boolean isCancelled() {
- return isCancelled;
- }
+ @Override
+ public void quorumLost(Set<InternalDistributedMember> failures,
+ List<InternalDistributedMember> remaining) {
+ // unused
+ }
- public DiskStoreBackup getBackupForDiskStore(DiskStoreImpl diskStore) {
- return backupByDiskStore.get(diskStore);
+ @Override
+ public void memberSuspect(InternalDistributedMember id,
InternalDistributedMember whoSuspected,
+ String reason) {
+ // unused
+ }
}
}
diff --git
a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/FileSystemBackupDestination.java
b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/FileSystemBackupDestination.java
new file mode 100644
index 0000000000..997ab10323
--- /dev/null
+++
b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/FileSystemBackupDestination.java
@@ -0,0 +1,145 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
contributor license
+ * agreements. See the NOTICE file distributed with this work for additional
information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache
License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express
+ * or implied. See the License for the specific language governing permissions
and limitations under
+ * the License.
+ */
+package org.apache.geode.internal.cache.backup;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.StandardCopyOption;
+import java.util.Collection;
+import java.util.Map;
+
+import org.apache.commons.io.FileUtils;
+
+import org.apache.geode.cache.DiskStore;
+import org.apache.geode.internal.cache.DiskStoreImpl;
+import org.apache.geode.internal.cache.GemFireCacheImpl;
+import org.apache.geode.internal.i18n.LocalizedStrings;
+
+public class FileSystemBackupDestination implements BackupDestination {
+ static final String INCOMPLETE_BACKUP_FILE = "INCOMPLETE_BACKUP_FILE";
+
+ private final Path backupDir;
+
+ FileSystemBackupDestination(Path backupDir) {
+ this.backupDir = backupDir;
+ }
+
+ @Override
+ public void backupFiles(BackupDefinition backupDefinition) throws
IOException {
+ Files.createDirectories(backupDir);
+ Files.createFile(backupDir.resolve(INCOMPLETE_BACKUP_FILE));
+ backupAllFilesets(backupDefinition);
+ Files.delete(backupDir.resolve(INCOMPLETE_BACKUP_FILE));
+ }
+
+ private void backupAllFilesets(BackupDefinition backupDefinition) throws
IOException {
+ backupUserFiles(backupDefinition.getUserFiles());
+ backupDeployedJars(backupDefinition.getDeployedJars());
+ backupConfigFiles(backupDefinition.getConfigFiles());
+ backupOplogs(backupDefinition.getOplogFilesByDiskStore());
+ backupDiskInitFiles(backupDefinition.getDiskInitFiles());
+ RestoreScript script = backupDefinition.getRestoreScript();
+ if (script != null) {
+ File scriptFile = script.generate(backupDir.toFile());
+ backupRestoreScript(scriptFile.toPath());
+ }
+ writeReadMe();
+ }
+
+ private void writeReadMe() throws IOException {
+ String text = LocalizedStrings.BackupManager_README.toLocalizedString();
+ Files.write(backupDir.resolve(README_FILE), text.getBytes());
+ }
+
+ private void backupRestoreScript(Path restoreScriptFile) throws IOException {
+ Files.copy(restoreScriptFile,
backupDir.resolve(restoreScriptFile.getFileName()));
+ }
+
+ private void backupDiskInitFiles(Map<DiskStore, Path> diskInitFiles) throws
IOException {
+ for (Map.Entry<DiskStore, Path> entry : diskInitFiles.entrySet()) {
+ Path destinationDirectory = getOplogBackupDir(entry.getKey(),
+ ((DiskStoreImpl) entry.getKey()).getInforFileDirIndex());
+
Files.createDirectories(destinationDirectory.resolve(destinationDirectory));
+ Files.copy(entry.getValue(),
destinationDirectory.resolve(destinationDirectory)
+ .resolve(entry.getValue().getFileName()),
StandardCopyOption.COPY_ATTRIBUTES);
+ }
+ }
+
+ private void backupUserFiles(Collection<Path> userFiles) throws IOException {
+ Path userDirectory = backupDir.resolve(USER_FILES_DIRECTORY);
+ Files.createDirectories(userDirectory);
+ moveFilesOrDirectories(userFiles, userDirectory);
+ }
+
+ private void backupDeployedJars(Collection<Path> jarFiles) throws
IOException {
+ Path jarsDirectory = backupDir.resolve(DEPLOYED_JARS_DIRECTORY);
+ Files.createDirectories(jarsDirectory);
+ moveFilesOrDirectories(jarFiles, jarsDirectory);
+ }
+
+ private void backupConfigFiles(Collection<Path> configFiles) throws
IOException {
+ Path configDirectory = backupDir.resolve(CONFIG_DIRECTORY);
+ Files.createDirectories(configDirectory);
+ moveFilesOrDirectories(configFiles, configDirectory);
+ }
+
+ private void backupOplogs(Map<DiskStore, Collection<Path>> oplogFiles)
throws IOException {
+ for (Map.Entry<DiskStore, Collection<Path>> entry : oplogFiles.entrySet())
{
+ for (Path path : entry.getValue()) {
+ int index = ((DiskStoreImpl) entry.getKey()).getInforFileDirIndex();
+ Path backupDir = createOplogBackupDir(entry.getKey(), index);
+ backupOplog(backupDir, path);
+ }
+ }
+ }
+
+ private Path getOplogBackupDir(DiskStore diskStore, int index) {
+ String name = diskStore.getName();
+ if (name == null) {
+ name = GemFireCacheImpl.getDefaultDiskStoreName();
+ }
+ name = name + "_" + ((DiskStoreImpl)
diskStore).getDiskStoreID().toString();
+ return backupDir.resolve(DATA_STORES_DIRECTORY).resolve(name)
+ .resolve(BACKUP_DIR_PREFIX + index);
+ }
+
+ private Path createOplogBackupDir(DiskStore diskStore, int index) throws
IOException {
+ Path oplogBackupDir = getOplogBackupDir(diskStore, index);
+ Files.createDirectories(oplogBackupDir);
+ return oplogBackupDir;
+ }
+
+ private void backupOplog(Path targetDir, Path path) throws IOException {
+ backupFile(targetDir, path.toFile());
+ }
+
+ private void backupFile(Path targetDir, File file) throws IOException {
+ Files.move(file.toPath(), targetDir.resolve(file.getName()));
+ }
+
+ private void moveFilesOrDirectories(Collection<Path> paths, Path
targetDirectory)
+ throws IOException {
+ for (Path userFile : paths) {
+ Path destination = targetDirectory.resolve(userFile.getFileName());
+ if (Files.isDirectory(userFile)) {
+ FileUtils.moveDirectory(userFile.toFile(), destination.toFile());
+ } else {
+ Files.move(userFile, destination);
+ }
+ }
+ }
+}
diff --git
a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/FlushToDiskFactory.java
b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/FlushToDiskFactory.java
index 9d919973db..d5dfc9a327 100644
---
a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/FlushToDiskFactory.java
+++
b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/FlushToDiskFactory.java
@@ -14,10 +14,8 @@
*/
package org.apache.geode.internal.cache.backup;
-import java.util.HashSet;
import java.util.Set;
-import org.apache.geode.cache.persistence.PersistentID;
import org.apache.geode.distributed.internal.DM;
import
org.apache.geode.distributed.internal.membership.InternalDistributedMember;
import org.apache.geode.internal.cache.InternalCache;
diff --git
a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/FlushToDiskOperation.java
b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/FlushToDiskOperation.java
index afeda08b2d..76fa279169 100644
---
a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/FlushToDiskOperation.java
+++
b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/FlushToDiskOperation.java
@@ -19,14 +19,11 @@
import org.apache.logging.log4j.Logger;
import org.apache.geode.CancelException;
-import org.apache.geode.cache.DiskStore;
import org.apache.geode.distributed.internal.DM;
import org.apache.geode.distributed.internal.DistributionMessage;
import org.apache.geode.distributed.internal.ReplyException;
import org.apache.geode.distributed.internal.ReplyProcessor21;
import
org.apache.geode.distributed.internal.membership.InternalDistributedMember;
-import org.apache.geode.internal.admin.remote.AdminResponse;
-import org.apache.geode.internal.admin.remote.CliLegacyMessage;
import org.apache.geode.internal.cache.InternalCache;
import org.apache.geode.internal.logging.LogService;
diff --git
a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/RestoreScript.java
b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/RestoreScript.java
index cd9d1600d3..a166e93bc2 100644
---
a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/RestoreScript.java
+++
b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/RestoreScript.java
@@ -77,7 +77,7 @@ public void addExistenceTest(final File originalFile) {
existenceTests.add(originalFile.getAbsoluteFile());
}
- public void generate(final File outputDir) throws IOException {
+ public File generate(final File outputDir) throws IOException {
File outputFile = new File(outputDir, generator.getScriptName());
try (BufferedWriter writer = Files.newBufferedWriter(outputFile.toPath()))
{
@@ -90,6 +90,7 @@ public void generate(final File outputDir) throws IOException
{
}
outputFile.setExecutable(true, true);
+ return outputFile;
}
private void writePreamble(BufferedWriter writer) throws IOException {
diff --git
a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/UnixScriptGenerator.java
b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/UnixScriptGenerator.java
index ee9a53d7f5..4cc22e25e7 100644
---
a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/UnixScriptGenerator.java
+++
b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/UnixScriptGenerator.java
@@ -18,9 +18,6 @@
import java.io.File;
import java.io.IOException;
-import org.apache.geode.internal.cache.backup.RestoreScript;
-import org.apache.geode.internal.cache.backup.ScriptGenerator;
-
class UnixScriptGenerator implements ScriptGenerator {
private static final String SCRIPT_FILE_NAME = "restore.sh";
diff --git
a/geode-core/src/test/java/org/apache/geode/internal/cache/DiskStoreImplIntegrationTest.java
b/geode-core/src/test/java/org/apache/geode/internal/cache/DiskStoreImplIntegrationTest.java
new file mode 100644
index 0000000000..8e9cf6d5b2
--- /dev/null
+++
b/geode-core/src/test/java/org/apache/geode/internal/cache/DiskStoreImplIntegrationTest.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
contributor license
+ * agreements. See the NOTICE file distributed with this work for additional
information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache
License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express
+ * or implied. See the License for the specific language governing permissions
and limitations under
+ * the License.
+ */
+package org.apache.geode.internal.cache;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+import java.io.File;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.rules.TemporaryFolder;
+
+import org.apache.geode.cache.Cache;
+import org.apache.geode.cache.CacheFactory;
+import org.apache.geode.cache.DiskStore;
+import org.apache.geode.cache.RegionShortcut;
+import org.apache.geode.distributed.ConfigurationProperties;
+import org.apache.geode.internal.cache.backup.BackupManager;
+import org.apache.geode.test.junit.categories.IntegrationTest;
+
+@Category(IntegrationTest.class)
+public class DiskStoreImplIntegrationTest {
+ private static final String DISK_STORE_NAME = "testDiskStore";
+ private static final String REGION_NAME = "testRegion";
+
+ @Rule
+ public TemporaryFolder temporaryDirectory = new TemporaryFolder();
+
+ private Cache cache;
+
+ @Before
+ public void setup() {
+ cache = createCache();
+ }
+
+ @After
+ public void tearDown() {
+ if (cache != null && !cache.isClosed()) {
+ cache.close();
+ }
+ }
+
+ @Test
+ public void cleansUpOrphanedBackupFilesOnDiskStoreCreation() throws
Exception {
+ File baseDir = temporaryDirectory.newFolder();
+ createRegionWithDiskStore(baseDir);
+ DiskStore diskStore = cache.findDiskStore(DISK_STORE_NAME);
+
+ List<Path> tempDirs = new ArrayList<>();
+ for (File diskDir : diskStore.getDiskDirs()) {
+ Path tempDir =
+
diskDir.toPath().resolve(BackupManager.DATA_STORES_TEMPORARY_DIRECTORY +
"testing");
+ Files.createDirectories(tempDir);
+ tempDirs.add(tempDir);
+ }
+
+ cache.close();
+ cache = createCache();
+ createRegionWithDiskStore(baseDir);
+
+ tempDirs.forEach(tempDir -> assertThat(Files.exists(tempDir)).isFalse());
+ }
+
+ private void createRegionWithDiskStore(File baseDir) {
+ cache.createDiskStoreFactory().setDiskDirs(new File[]
{baseDir}).create(DISK_STORE_NAME);
+ cache.<String,
String>createRegionFactory(RegionShortcut.PARTITION_PERSISTENT)
+ .setDiskStoreName(DISK_STORE_NAME).create(REGION_NAME);
+ }
+
+ private Cache createCache() {
+ // Setting MCAST port explicitly is currently required due to default
properties set in gradle
+ return new CacheFactory().set(ConfigurationProperties.MCAST_PORT,
"0").create();
+ }
+}
diff --git
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/BackupDefinitionTest.java
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/BackupDefinitionTest.java
new file mode 100644
index 0000000000..5ca9a2b19e
--- /dev/null
+++
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/BackupDefinitionTest.java
@@ -0,0 +1,119 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
contributor license
+ * agreements. See the NOTICE file distributed with this work for additional
information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache
License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express
+ * or implied. See the License for the specific language governing permissions
and limitations under
+ * the License.
+ */
+package org.apache.geode.internal.cache.backup;
+
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.assertThatThrownBy;
+import static org.mockito.Mockito.mock;
+
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Collections;
+
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import org.apache.geode.cache.DiskStore;
+import org.apache.geode.test.junit.categories.UnitTest;
+
+@Category(UnitTest.class)
+public class BackupDefinitionTest {
+
+ private BackupDefinition backupDefinition = new BackupDefinition();
+
+ @Test
+ public void hasNoFilesWhenInitialized() {
+ assertThat(backupDefinition.getConfigFiles()).isEmpty();
+ assertThat(backupDefinition.getDeployedJars()).isEmpty();
+ assertThat(backupDefinition.getUserFiles()).isEmpty();
+ assertThat(backupDefinition.getOplogFilesByDiskStore()).isEmpty();
+ assertThat(backupDefinition.getDiskInitFiles()).isEmpty();
+ assertThat(backupDefinition.getRestoreScript()).isNull();
+ }
+
+ @Test
+ public void returnsNonModifiableCollections() {
+ Path cannotBeAdded = Paths.get("");
+ assertThatThrownBy(() ->
backupDefinition.getConfigFiles().add(cannotBeAdded))
+ .isInstanceOf(UnsupportedOperationException.class);
+ assertThatThrownBy(() ->
backupDefinition.getDeployedJars().add(cannotBeAdded))
+ .isInstanceOf(UnsupportedOperationException.class);
+ assertThatThrownBy(() ->
backupDefinition.getUserFiles().add(cannotBeAdded))
+ .isInstanceOf(UnsupportedOperationException.class);
+ assertThatThrownBy(() ->
backupDefinition.getOplogFilesByDiskStore().put(mock(DiskStore.class),
+
Collections.emptySet())).isInstanceOf(UnsupportedOperationException.class);
+ assertThatThrownBy(
+ () -> backupDefinition.getDiskInitFiles().put(mock(DiskStore.class),
cannotBeAdded))
+ .isInstanceOf(UnsupportedOperationException.class);
+ }
+
+ @Test
+ public void containsConfigFilesAdded() {
+ Path config1 = Paths.get("config1");
+ Path config2 = Paths.get("config2");
+ backupDefinition.addConfigFileToBackup(config1);
+ backupDefinition.addConfigFileToBackup(config2);
+ assertThat(backupDefinition.getConfigFiles()).containsOnly(config1,
config2);
+ }
+
+ @Test
+ public void containsDeployedJarFilesAdded() {
+ Path jar1 = Paths.get("jar1");
+ Path jar2 = Paths.get("jar2");
+ backupDefinition.addDeployedJarToBackup(jar1);
+ backupDefinition.addDeployedJarToBackup(jar2);
+ assertThat(backupDefinition.getDeployedJars()).containsOnly(jar1, jar2);
+ }
+
+ @Test
+ public void containsUserFilesAdded() {
+ Path userFile1 = Paths.get("userFile1");
+ Path userFile2 = Paths.get("userFile2");
+ backupDefinition.addUserFilesToBackup(userFile1);
+ backupDefinition.addUserFilesToBackup(userFile2);
+ assertThat(backupDefinition.getUserFiles()).containsOnly(userFile1,
userFile2);
+ }
+
+ @Test
+ public void containsAllAddedOplogFilesAdded() {
+ DiskStore diskStore = mock(DiskStore.class);
+ Path file1 = mock(Path.class);
+ Path file2 = mock(Path.class);
+ backupDefinition.addOplogFileToBackup(diskStore, file1);
+
assertThat(backupDefinition.getOplogFilesByDiskStore()).containsEntry(diskStore,
+ Collections.singleton(file1));
+ backupDefinition.addOplogFileToBackup(diskStore, file2);
+ assertThat(backupDefinition.getOplogFilesByDiskStore().get(diskStore))
+ .containsExactlyInAnyOrder(file1, file2);
+ }
+
+ @Test
+ public void containsAllDiskInitFiles() {
+ DiskStore diskStore1 = mock(DiskStore.class);
+ DiskStore diskStore2 = mock(DiskStore.class);
+ Path diskInit1 = Paths.get("diskInit1");
+ Path diskInit2 = Paths.get("diskInit2");
+ backupDefinition.addDiskInitFile(diskStore1, diskInit1);
+ backupDefinition.addDiskInitFile(diskStore2, diskInit2);
+
assertThat(backupDefinition.getDiskInitFiles()).hasSize(2).containsValues(diskInit1,
diskInit2);
+ }
+
+ @Test
+ public void hasSetRestoreScript() {
+ RestoreScript restoreScript = new RestoreScript();
+ backupDefinition.setRestoreScript(restoreScript);
+ assertThat(backupDefinition.getRestoreScript()).isSameAs(restoreScript);
+ }
+}
diff --git
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/BackupDistributedTest.java
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/BackupDistributedTest.java
index 77ee71952d..a5ffba5e18 100755
---
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/BackupDistributedTest.java
+++
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/BackupDistributedTest.java
@@ -14,26 +14,25 @@
*/
package org.apache.geode.internal.cache.backup;
+import static org.apache.commons.io.FileUtils.listFiles;
+import static org.apache.commons.io.filefilter.DirectoryFileFilter.DIRECTORY;
import static org.apache.geode.test.dunit.Host.getHost;
import static org.apache.geode.test.dunit.IgnoredException.addIgnoredException;
-import static org.apache.geode.test.dunit.Invoke.invokeInEveryVM;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatThrownBy;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
import java.io.BufferedReader;
import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.FileReader;
import java.io.IOException;
-import java.io.PrintStream;
-import java.io.UncheckedIOException;
+import java.io.InputStreamReader;
import java.nio.file.Files;
import java.util.ArrayList;
import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
import java.util.Set;
import java.util.TreeSet;
import java.util.concurrent.CompletableFuture;
@@ -45,22 +44,27 @@
import junitparams.Parameters;
import junitparams.naming.TestCaseName;
import org.apache.commons.io.FileUtils;
+import org.apache.commons.io.filefilter.RegexFileFilter;
+import org.apache.logging.log4j.Logger;
import org.junit.Before;
+import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.runner.RunWith;
import org.apache.geode.cache.Cache;
-import org.apache.geode.cache.DataPolicy;
+import org.apache.geode.cache.CacheClosedException;
import org.apache.geode.cache.DiskStore;
-import org.apache.geode.cache.DiskStoreFactory;
import org.apache.geode.cache.EvictionAction;
import org.apache.geode.cache.EvictionAttributes;
import org.apache.geode.cache.PartitionAttributesFactory;
+import org.apache.geode.cache.PartitionedRegionStorageException;
import org.apache.geode.cache.Region;
import org.apache.geode.cache.RegionFactory;
+import org.apache.geode.cache.RegionShortcut;
import org.apache.geode.cache.control.RebalanceOperation;
import org.apache.geode.cache.control.RebalanceResults;
+import org.apache.geode.cache.persistence.PartitionOfflineException;
import org.apache.geode.distributed.DistributedMember;
import org.apache.geode.distributed.internal.DistributionManager;
import org.apache.geode.distributed.internal.DistributionMessage;
@@ -68,17 +72,16 @@
import org.apache.geode.distributed.internal.ReplyMessage;
import org.apache.geode.internal.admin.remote.AdminFailureResponse;
import
org.apache.geode.internal.cache.DestroyRegionOperation.DestroyRegionMessage;
-import org.apache.geode.internal.cache.DiskRegion;
import org.apache.geode.internal.cache.GemFireCacheImpl;
import org.apache.geode.internal.cache.PartitionedRegion;
import
org.apache.geode.internal.cache.partitioned.PersistentPartitionedRegionTestBase;
+import org.apache.geode.internal.logging.LogService;
import org.apache.geode.management.BackupStatus;
+import org.apache.geode.management.ManagementException;
import org.apache.geode.test.dunit.AsyncInvocation;
-import org.apache.geode.test.dunit.DUnitEnv;
-import org.apache.geode.test.dunit.SerializableCallable;
-import org.apache.geode.test.dunit.SerializableRunnable;
import org.apache.geode.test.dunit.VM;
import org.apache.geode.test.junit.categories.DistributedTest;
+import
org.apache.geode.test.junit.rules.serializable.SerializableTemporaryFolder;
/**
* Additional tests to consider adding:
@@ -92,11 +95,19 @@
@RunWith(JUnitParamsRunner.class)
@SuppressWarnings("serial")
public class BackupDistributedTest extends PersistentPartitionedRegionTestBase
{
+ private static final Logger logger = LogService.getLogger();
+
+ private static final int NUM_BUCKETS = 15;
+
+ @Rule
+ public SerializableTemporaryFolder tempDir = new
SerializableTemporaryFolder();
private VM vm0;
private VM vm1;
private VM vm2;
private VM vm3;
+ private Map<VM, File> workingDirByVm;
+ private File backupBaseDir;
@Before
public void setUp() throws Exception {
@@ -104,6 +115,15 @@ public void setUp() throws Exception {
vm1 = getHost(0).getVM(1);
vm2 = getHost(0).getVM(2);
vm3 = getHost(0).getVM(3);
+
+ workingDirByVm = new HashMap<>();
+ workingDirByVm.put(vm0, tempDir.newFolder());
+ workingDirByVm.put(vm1, tempDir.newFolder());
+ workingDirByVm.put(vm2, tempDir.newFolder());
+ workingDirByVm.put(vm3, tempDir.newFolder());
+
+ backupBaseDir = tempDir.newFolder("backupDir");
+
}
@Override
@@ -112,30 +132,22 @@ public final void preTearDownCacheTestCase() throws
Exception {
DistributionMessageObserver.setInstance(null);
disconnectFromDS();
});
-
- StringBuilder failures = new StringBuilder();
- delete(getBackupDir(), failures);
- if (failures.length() > 0) {
- // logger.error(failures.toString());
- }
}
@Test
public void testBackupPR() throws Exception {
- createPersistentRegion(vm0);
- createPersistentRegion(vm1);
+ createPersistentRegions();
- long lm0 = setBackupFiles(vm0);
- long lm1 = setBackupFiles(vm1);
+ long lastModified0 = setBackupFiles(vm0);
+ long lastModified1 = setBackupFiles(vm1);
- createData(vm0, 0, 5, "A", "region1");
- createData(vm0, 0, 5, "B", "region2");
+ createData();
- BackupStatus status = backup(vm2);
+ BackupStatus status = backupMember(vm2);
assertThat(status.getBackedUpDiskStores()).hasSize(2);
assertThat(status.getOfflineDiskStores()).isEmpty();
- Collection<File> files = FileUtils.listFiles(getBackupDir(), new String[]
{"txt"}, true);
+ Collection<File> files = FileUtils.listFiles(backupBaseDir, new String[]
{"txt"}, true);
assertThat(files).hasSize(4);
deleteOldUserUserFile(vm0);
@@ -156,12 +168,17 @@ public void testBackupPR() throws Exception {
restoreBackup(2);
- createPersistentRegionsAsync();
+ createPersistentRegions();
checkData(vm0, 0, 5, "A", "region1");
checkData(vm0, 0, 5, "B", "region2");
- verifyUserFileRestored(vm0, lm0);
- verifyUserFileRestored(vm1, lm1);
+ verifyUserFileRestored(vm0, lastModified0);
+ verifyUserFileRestored(vm1, lastModified1);
+ }
+
+ private void createData() {
+ createData(vm0, 0, 5, "A", "region1");
+ createData(vm0, 0, 5, "B", "region2");
}
/**
@@ -173,13 +190,11 @@ public void testBackupPR() throws Exception {
*/
@Test
public void testBackupFromMemberWithDiskStore() throws Exception {
- createPersistentRegion(vm0);
- createPersistentRegion(vm1);
+ createPersistentRegions();
- createData(vm0, 0, 5, "A", "region1");
- createData(vm0, 0, 5, "B", "region2");
+ createData();
- BackupStatus status = backup(vm1);
+ BackupStatus status = backupMember(vm1);
assertThat(status.getBackedUpDiskStores()).hasSize(2);
for (DistributedMember key : status.getBackedUpDiskStores().keySet()) {
@@ -193,11 +208,11 @@ public void testBackupFromMemberWithDiskStore() throws
Exception {
closeCache(vm1);
// destroy the current data
- invokeInEveryVM("cleanDiskDirs", () -> cleanDiskDirs());
+ cleanDiskDirsInEveryVM();
restoreBackup(2);
- createPersistentRegionsAsync();
+ createPersistentRegions();
checkData(vm0, 0, 5, "A", "region1");
checkData(vm0, 0, 5, "B", "region2");
@@ -212,16 +227,16 @@ public void testBackupFromMemberWithDiskStore() throws
Exception {
*/
@Test
public void testBackupWhileBucketIsCreated() throws Exception {
- createPersistentRegion(vm0);
+ createPersistentRegion(vm0).await();
// create a bucket on vm0
createData(vm0, 0, 1, "A", "region1");
// create the pr on vm1, which won't have any buckets
- createPersistentRegion(vm1);
+ createPersistentRegion(vm1).await();
CompletableFuture<BackupStatus> backupStatusFuture =
- CompletableFuture.supplyAsync(() -> backup(vm2));
+ CompletableFuture.supplyAsync(() -> backupMember(vm2));
CompletableFuture<Void> createDataFuture =
CompletableFuture.runAsync(() -> createData(vm0, 1, 5, "A",
"region1"));
CompletableFuture.allOf(backupStatusFuture, createDataFuture);
@@ -241,11 +256,11 @@ public void testBackupWhileBucketIsCreated() throws
Exception {
closeCache(vm1);
// destroy the current data
- invokeInEveryVM("cleanDiskDirs", () -> cleanDiskDirs());
+ cleanDiskDirsInEveryVM();
restoreBackup(2);
- createPersistentRegionsAsync();
+ createPersistentRegions();
checkData(vm0, 0, 1, "A", "region1");
}
@@ -271,13 +286,13 @@ public void testWhileBucketIsMovedBackup(final
WhenToInvokeBackup whenToInvokeBa
DistributionMessageObserver.setInstance(createTestHookToBackup(whenToInvokeBackup));
});
- createPersistentRegion(vm0);
+ createPersistentRegion(vm0).await();
// create twos bucket on vm0
createData(vm0, 0, 2, "A", "region1");
// create the pr on vm1, which won't have any buckets
- createPersistentRegion(vm1);
+ createPersistentRegion(vm1).await();
// Perform a rebalance. This will trigger the backup in the middle of the
bucket move.
vm0.invoke("Do rebalance", () -> {
@@ -299,13 +314,11 @@ public void testWhileBucketIsMovedBackup(final
WhenToInvokeBackup whenToInvokeBa
closeCache(vm1);
// Destroy the current data
- invokeInEveryVM("Clean disk dirs", () -> {
- cleanDiskDirs();
- });
+ cleanDiskDirsInEveryVM();
restoreBackup(2);
- createPersistentRegionsAsync();
+ createPersistentRegions();
checkData(vm0, 0, 2, "A", "region1");
}
@@ -324,16 +337,14 @@ public void
testBackupStatusCleanedUpAfterFailureOnOneMember() throws Exception
createTestHookToThrowIOExceptionBeforeProcessingPrepareBackupRequest(exceptionMessage));
});
- createPersistentRegion(vm0);
- createPersistentRegion(vm1);
+ createPersistentRegions();
- createData(vm0, 0, 5, "A", "region1");
- createData(vm0, 0, 5, "B", "region2");
+ createData();
- assertThatThrownBy(() ->
backup(vm2)).hasRootCauseInstanceOf(IOException.class);
+ assertThatThrownBy(() ->
backupMember(vm2)).hasRootCauseInstanceOf(IOException.class);
// second backup should succeed because the observer and backup state has
been cleared
- BackupStatus status = backup(vm2);
+ BackupStatus status = backupMember(vm2);
assertThat(status.getBackedUpDiskStores()).hasSize(2);
assertThat(status.getOfflineDiskStores()).isEmpty();
}
@@ -343,13 +354,12 @@ public void
testBackupStatusCleanedUpAfterFailureOnOneMember() throws Exception
*/
@Test
public void testBackupOverflow() throws Exception {
- createPersistentRegion(vm0);
+ createPersistentRegion(vm0).await();
createOverflowRegion(vm1);
- createData(vm0, 0, 5, "A", "region1");
- createData(vm0, 0, 5, "B", "region2");
+ createData();
- BackupStatus status = backup(vm2);
+ BackupStatus status = backupMember(vm2);
assertThat(status.getBackedUpDiskStores()).hasSize(1);
assertThat(status.getBackedUpDiskStores().values().iterator().next()).hasSize(2);
assertThat(status.getOfflineDiskStores()).isEmpty();
@@ -359,16 +369,15 @@ public void testBackupOverflow() throws Exception {
@Test
public void testBackupPRWithOfflineMembers() throws Exception {
- createPersistentRegion(vm0);
- createPersistentRegion(vm1);
- createPersistentRegion(vm2);
+ createPersistentRegion(vm0).await();
+ createPersistentRegion(vm1).await();
+ createPersistentRegion(vm2).await();
- createData(vm0, 0, 5, "A", "region1");
- createData(vm0, 0, 5, "B", "region2");
+ createData();
closeCache(vm2);
- BackupStatus status = backup(vm3);
+ BackupStatus status = backupMember(vm3);
assertThat(status.getBackedUpDiskStores()).hasSize(2);
assertThat(status.getOfflineDiskStores()).hasSize(2);
}
@@ -377,14 +386,69 @@ private DistributionMessageObserver
createTestHookToBackup(
WhenToInvokeBackup backupInvocationTestHook) {
switch (backupInvocationTestHook) {
case BEFORE_SENDING_DESTROYREGIONMESSAGE:
- return createTestHookToBackupBeforeSendingDestroyRegionMessage(() ->
backup(vm2));
+ return createTestHookToBackupBeforeSendingDestroyRegionMessage(() ->
backupMember(vm2));
case BEFORE_PROCESSING_REPLYMESSAGE:
- return createTestHookToBackupBeforeProcessingReplyMessage(() ->
backup(vm2));
+ return createTestHookToBackupBeforeProcessingReplyMessage(() ->
backupMember(vm2));
default:
throw new AssertionError("Invalid backupInvocationTestHook " +
backupInvocationTestHook);
}
}
+ /**
+ * Test what happens when we restart persistent members while there is an
accessor concurrently
+ * performing puts.
+ */
+ @Test
+ public void testRecoverySystemWithConcurrentPutter() throws Throwable {
+ createColatedPersistentRegions(vm1).await();
+ createColatedPersistentRegions(vm2).await();
+
+ createAccessor(vm0);
+
+ createData(vm0, 0, NUM_BUCKETS, "a", "region1");
+ createData(vm0, 0, NUM_BUCKETS, "a", "region2");
+
+
+ // backup the system. We use this to get a snapshot of vm1 and vm2
+ // when they both are online. Recovering from this backup simulates
+ // a simulataneous kill and recovery.
+ backupMember(vm3);
+
+ closeCache(vm1);
+ closeCache(vm2);
+
+ cleanDiskDirsInEveryVM();
+ restoreBackup(2);
+
+ // in vm0, start doing a bunch of concurrent puts.
+ AsyncInvocation async0 = vm0.invokeAsync(() -> {
+ Cache cache = getCache();
+ Region region = cache.getRegion("region1");
+ try {
+ for (int i = 0;; i++) {
+ try {
+ region.get(i % NUM_BUCKETS);
+ } catch (PartitionOfflineException |
PartitionedRegionStorageException expected) {
+ // do nothing.
+ }
+ }
+ } catch (CacheClosedException expected) {
+ // ok, we're done.
+ }
+ });
+
+ AsyncInvocation async1 = createColatedPersistentRegions(vm1);
+ AsyncInvocation async2 = createColatedPersistentRegions(vm2);
+ async1.await();
+ async2.await();
+
+ // close the cache in vm0 to stop the async puts.
+ closeCache(vm0);
+
+ // make sure we didn't get an exception
+ async0.await();
+ }
+
private DistributionMessageObserver
createTestHookToBackupBeforeProcessingReplyMessage(
Runnable task) {
return new DistributionMessageObserver() {
@@ -428,8 +492,12 @@ public void beforeSendMessage(DistributionManager dm,
DistributionMessage messag
}
private void cleanDiskDirsInEveryVM() {
- invokeInEveryVM("cleanDiskDirsInEveryVM", () -> {
- cleanDiskDirs();
+ workingDirByVm.forEach((vm, file) -> {
+ try {
+ FileUtils.deleteDirectory(file);
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
});
}
@@ -451,151 +519,113 @@ public void beforeProcessMessage(DistributionManager
dm, DistributionMessage mes
};
}
- private void createPersistentRegionsAsync() throws ExecutionException,
InterruptedException {
- AsyncInvocation async0 = createPersistentRegionAsync(vm0);
- AsyncInvocation async1 = createPersistentRegionAsync(vm1);
- async0.await();
- async1.await();
+ private void createPersistentRegions() throws ExecutionException,
InterruptedException {
+ AsyncInvocation create1 = createPersistentRegion(vm0);
+ AsyncInvocation create2 = createPersistentRegion(vm1);
+ create1.await();
+ create2.await();
}
private void validateBackupComplete() {
- File backupDir = getBackupDir();
Pattern pattern = Pattern.compile(".*INCOMPLETE.*");
- File[] files = backupDir.listFiles((dir1, name) ->
pattern.matcher(name).matches());
+ File[] files = backupBaseDir.listFiles((dir1, name) ->
pattern.matcher(name).matches());
assertNotNull(files);
assertTrue(files.length == 0);
}
- private void createPersistentRegion(VM vm) throws Exception {
- createPersistentRegionAsync(vm).await();
- }
-
private void deleteOldUserUserFile(final VM vm) {
- SerializableRunnable validateUserFileBackup = new
SerializableRunnable("set user backups") {
- @Override
- public void run() {
- try {
- FileUtils.deleteDirectory(new File("userbackup_" + vm.getId()));
- } catch (IOException e) {
- fail(e.getMessage());
- }
- }
- };
- vm.invoke(validateUserFileBackup);
+ vm.invoke(() -> {
+ File userDir = new File(workingDirByVm.get(vm), "userbackup-");
+ FileUtils.deleteDirectory(userDir);
+ });
}
private long setBackupFiles(final VM vm) {
- SerializableCallable setUserBackups = new SerializableCallable("set user
backups") {
- @Override
- public Object call() {
- final int pid = DUnitEnv.get().getPid();
- File vmdir = new File("userbackup_" + pid);
- File test1 = new File(vmdir, "test1");
- File test2 = new File(test1, "test2");
- File mytext = new File(test2, "my.txt");
- final ArrayList<File> backuplist = new ArrayList<>();
- test2.mkdirs();
- PrintStream ps = null;
- try {
- ps = new PrintStream(mytext);
- } catch (FileNotFoundException e) {
- fail(e.getMessage());
- }
- ps.println(pid);
- ps.close();
- mytext.setExecutable(true, true);
- long lastModified = mytext.lastModified();
- backuplist.add(test2);
-
- Cache cache = getCache();
- GemFireCacheImpl gfci = (GemFireCacheImpl) cache;
- gfci.setBackupFiles(backuplist);
-
- return lastModified;
- }
- };
- return (long) vm.invoke(setUserBackups);
+ return vm.invoke(() -> {
+ File workingDir = workingDirByVm.get(vm);
+ File test1 = new File(workingDir, "test1");
+ File test2 = new File(test1, "test2");
+ File mytext = new File(test2, "my.txt");
+ final ArrayList<File> backuplist = new ArrayList<>();
+ test2.mkdirs();
+ Files.createFile(mytext.toPath());
+ long lastModified = mytext.lastModified();
+ backuplist.add(test2);
+
+ GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
+ cache.setBackupFiles(backuplist);
+
+ return lastModified;
+ });
}
private void verifyUserFileRestored(VM vm, final long lm) {
- vm.invoke(new SerializableRunnable() {
- @Override
- public void run() {
- final int pid = DUnitEnv.get().getPid();
- File vmdir = new File("userbackup_" + pid);
- File mytext = new File(vmdir, "test1/test2/my.txt");
- assertTrue(mytext.exists());
- if (System.getProperty("java.specification.version").equals("1.6")) {
- assertTrue(mytext.canExecute());
- } else {
- System.out.println(
- "java.specification.version is " +
System.getProperty("java.specification.version")
- + ", canExecute is" + mytext.canExecute());
- }
- assertEquals(lm, mytext.lastModified());
-
- try {
- FileReader fr = new FileReader(mytext);
- BufferedReader bin = new BufferedReader(fr);
- String content = bin.readLine();
- assertTrue(content.equals("" + pid));
- } catch (IOException e) {
- fail(e.getMessage());
- }
- }
+ vm.invoke(() -> {
+ File workingDir = workingDirByVm.get(vm);
+ File test1 = new File(workingDir, "test1");
+ File test2 = new File(test1, "test2");
+ File mytext = new File(test2, "my.txt");
+ assertTrue(mytext.exists());
+ assertEquals(lm, mytext.lastModified());
});
}
- private AsyncInvocation createPersistentRegionAsync(final VM vm) {
- SerializableRunnable createRegion = new SerializableRunnable("Create
persistent region") {
- @Override
- public void run() {
- Cache cache = getCache();
- DiskStoreFactory dsf = cache.createDiskStoreFactory();
- dsf.setDiskDirs(getDiskDirs(getUniqueName()));
- dsf.setMaxOplogSize(1);
- DiskStore ds = dsf.create(getUniqueName());
-
- RegionFactory rf = new RegionFactory();
- rf.setDiskStoreName(ds.getName());
- rf.setDiskSynchronous(true);
- rf.setDataPolicy(getDataPolicy());
- PartitionAttributesFactory paf = new PartitionAttributesFactory();
- paf.setRedundantCopies(0);
- rf.setPartitionAttributes(paf.create());
- rf.create("region1");
-
- dsf = cache.createDiskStoreFactory();
- dsf.setDiskDirs(getDiskDirs(getUniqueName() + 2));
- dsf.setMaxOplogSize(1);
- dsf.create(getUniqueName() + 2);
- rf.setDiskStoreName(getUniqueName() + 2);
- rf.create("region2");
- }
- };
- return vm.invokeAsync(createRegion);
+ private AsyncInvocation createPersistentRegion(final VM vm) {
+ return vm.invokeAsync(() -> {
+ Cache cache = getCache();
+ DiskStore diskStore1 = cache.createDiskStoreFactory()
+ .setDiskDirs(getDiskDirs(vm, "vm" + vm.getId() +
"diskstores_1")).setMaxOplogSize(1)
+ .create(getUniqueName());
+
+ DiskStore diskStore2 = cache.createDiskStoreFactory()
+ .setDiskDirs(getDiskDirs(vm, "vm" + vm.getId() +
"diskstores_2")).setMaxOplogSize(1)
+ .create(getUniqueName() + 2);
+
+ RegionFactory regionFactory =
cache.createRegionFactory(RegionShortcut.PARTITION_PERSISTENT)
+ .setPartitionAttributes(new
PartitionAttributesFactory().setRedundantCopies(0).create());
+
+
regionFactory.setDiskStoreName(diskStore1.getName()).setDiskSynchronous(true)
+ .create("region1");
+
regionFactory.setDiskStoreName(diskStore2.getName()).setDiskSynchronous(true)
+ .create("region2");
+ });
+ }
+
+ private AsyncInvocation createColatedPersistentRegions(final VM vm) {
+ return vm.invokeAsync(() -> {
+ Cache cache = getCache();
+ DiskStore diskStore1 = cache.createDiskStoreFactory()
+ .setDiskDirs(getDiskDirs(vm, "vm" + vm.getId() +
"diskstores_1")).setMaxOplogSize(1)
+ .create(getUniqueName());
+
+ DiskStore diskStore2 = cache.createDiskStoreFactory()
+ .setDiskDirs(getDiskDirs(vm, "vm" + vm.getId() +
"diskstores_2")).setMaxOplogSize(1)
+ .create(getUniqueName() + 2);
+
+ RegionFactory regionFactory =
cache.createRegionFactory(RegionShortcut.PARTITION_PERSISTENT)
+ .setPartitionAttributes(new
PartitionAttributesFactory().setRedundantCopies(0).create());
+
+
regionFactory.setDiskStoreName(diskStore1.getName()).setDiskSynchronous(true)
+ .create("region1");
+
regionFactory.setDiskStoreName(diskStore2.getName()).setDiskSynchronous(true)
+ .setPartitionAttributes(new
PartitionAttributesFactory().setRedundantCopies(0)
+ .setColocatedWith("region1").create())
+ .create("region2");
+ });
}
private void createOverflowRegion(final VM vm) {
- SerializableRunnable createRegion = new SerializableRunnable("Create
persistent region") {
- @Override
- public void run() {
- Cache cache = getCache();
- DiskStoreFactory dsf = cache.createDiskStoreFactory();
- dsf.setDiskDirs(getDiskDirs(getUniqueName()));
- dsf.setMaxOplogSize(1);
- DiskStore ds = dsf.create(getUniqueName());
-
- RegionFactory rf = new RegionFactory();
- rf.setDiskStoreName(ds.getName());
- rf.setDiskSynchronous(true);
- rf.setDataPolicy(DataPolicy.REPLICATE);
- rf.setEvictionAttributes(
- EvictionAttributes.createLIFOEntryAttributes(1,
EvictionAction.OVERFLOW_TO_DISK));
- rf.create("region3");
- }
- };
- vm.invoke(createRegion);
+ vm.invoke(() -> {
+ Cache cache = getCache();
+ DiskStore diskStore = cache.createDiskStoreFactory()
+ .setDiskDirs(getDiskDirs(vm,
getUniqueName())).create(getUniqueName());
+
+
cache.createRegionFactory(RegionShortcut.REPLICATE).setDiskStoreName(diskStore.getName())
+ .setDiskSynchronous(true)
+ .setEvictionAttributes(
+ EvictionAttributes.createLIFOEntryAttributes(1,
EvictionAction.OVERFLOW_TO_DISK))
+ .create("region3");
+ });
}
@Override
@@ -606,19 +636,14 @@ protected void createData(VM vm, final int startKey,
final int endKey, final Str
@Override
protected void createData(VM vm, final int startKey, final int endKey, final
String value,
final String regionName) {
- SerializableRunnable createData = new SerializableRunnable() {
-
- @Override
- public void run() {
- Cache cache = getCache();
- Region region = cache.getRegion(regionName);
+ vm.invoke(() -> {
+ Cache cache = getCache();
+ Region region = cache.getRegion(regionName);
- for (int i = startKey; i < endKey; i++) {
- region.put(i, value);
- }
+ for (int i = startKey; i < endKey; i++) {
+ region.put(i, value);
}
- };
- vm.invoke(createData);
+ });
}
@Override
@@ -629,32 +654,18 @@ protected void checkData(VM vm, final int startKey, final
int endKey, final Stri
@Override
protected void checkData(VM vm, final int startKey, final int endKey, final
String value,
final String regionName) {
- SerializableRunnable checkData = new SerializableRunnable() {
+ vm.invoke(() -> {
+ Region region = getCache().getRegion(regionName);
- @Override
- public void run() {
- Cache cache = getCache();
- Region region = cache.getRegion(regionName);
-
- for (int i = startKey; i < endKey; i++) {
- assertEquals(value, region.get(i));
- }
+ for (int i = startKey; i < endKey; i++) {
+ assertEquals(value, region.get(i));
}
- };
-
- vm.invoke(checkData);
+ });
}
@Override
protected void closeCache(final VM vm) {
- SerializableRunnable closeCache = new SerializableRunnable("close cache") {
- @Override
- public void run() {
- Cache cache = getCache();
- cache.close();
- }
- };
- vm.invoke(closeCache);
+ vm.invoke(() -> getCache().close());
}
@Override
@@ -664,76 +675,70 @@ public void run() {
@Override
protected Set<Integer> getBucketList(VM vm, final String regionName) {
- SerializableCallable getBuckets = new SerializableCallable("get buckets") {
-
- @Override
- public Object call() throws Exception {
- Cache cache = getCache();
- PartitionedRegion region = (PartitionedRegion)
cache.getRegion(regionName);
- return new TreeSet<>(region.getDataStore().getAllLocalBucketIds());
- }
- };
-
- return (Set<Integer>) vm.invoke(getBuckets);
+ return vm.invoke(() -> {
+ Cache cache = getCache();
+ PartitionedRegion region = (PartitionedRegion)
cache.getRegion(regionName);
+ return new TreeSet<>(region.getDataStore().getAllLocalBucketIds());
+ });
}
- private File[] getDiskDirs(String dsName) {
- File[] dirs = getDiskDirs();
+ private File[] getDiskDirs(VM vm, String dsName) {
File[] diskStoreDirs = new File[1];
- diskStoreDirs[0] = new File(dirs[0], dsName);
+ diskStoreDirs[0] = new File(workingDirByVm.get(vm), dsName);
diskStoreDirs[0].mkdirs();
return diskStoreDirs;
}
- private DataPolicy getDataPolicy() {
- return DataPolicy.PERSISTENT_PARTITION;
- }
-
- void checkRecoveredFromDisk(VM vm, final int bucketId, final boolean
recoveredLocally) {
- vm.invoke(new SerializableRunnable("check recovered from disk") {
- @Override
- public void run() {
- Cache cache = getCache();
- PartitionedRegion region = (PartitionedRegion)
cache.getRegion(getPartitionedRegionName());
- DiskRegion disk =
region.getRegionAdvisor().getBucket(bucketId).getDiskRegion();
- if (recoveredLocally) {
- assertEquals(0, disk.getStats().getRemoteInitializations());
- assertEquals(1, disk.getStats().getLocalInitializations());
- } else {
- assertEquals(1, disk.getStats().getRemoteInitializations());
- assertEquals(0, disk.getStats().getLocalInitializations());
- }
+ private BackupStatus backupMember(final VM vm) {
+ return vm.invoke("backup", () -> {
+ try {
+ return
BackupUtil.backupAllMembers(getSystem().getDistributionManager(), backupBaseDir,
+ null);
+ } catch (ManagementException e) {
+ throw new RuntimeException(e);
}
});
}
- /**
- * Recursively delete a file or directory. A description of any files or
directories that can not
- * be deleted will be added to failures if failures is non-null. This method
tries to delete as
- * much as possible.
- */
- public static void delete(File file, StringBuilder failures) {
- if (!file.exists()) {
- return;
+ protected void restoreBackup(final int expectedNumScripts)
+ throws IOException, InterruptedException {
+ Collection<File> restoreScripts =
+ listFiles(backupBaseDir, new RegexFileFilter(".*restore.*"),
DIRECTORY);
+ assertThat(restoreScripts).hasSize(expectedNumScripts);
+ for (File script : restoreScripts) {
+ execute(script);
}
+ }
- if (file.isDirectory()) {
- File[] fileList = file.listFiles();
- if (fileList != null) {
- for (File child : fileList) {
- delete(child, failures);
- }
- }
- }
+ private void execute(final File script) throws IOException,
InterruptedException {
+ ProcessBuilder processBuilder = new
ProcessBuilder(script.getAbsolutePath());
+ processBuilder.redirectErrorStream(true);
+ Process process = processBuilder.start();
- try {
- Files.delete(file.toPath());
- } catch (IOException e) {
- if (failures != null) {
- failures.append("Could not delete ").append(file).append(" due to
").append(e.getMessage())
- .append('\n');
+ try (BufferedReader reader =
+ new BufferedReader(new InputStreamReader(process.getInputStream()))) {
+ String line;
+ while ((line = reader.readLine()) != null) {
+ logger.info("OUTPUT:" + line);
}
}
+
+ assertThat(process.waitFor()).isEqualTo(0);
+ }
+
+ private void createAccessor(VM vm) {
+ vm.invoke(() -> {
+ Cache cache = getCache();
+
+ cache.createRegionFactory(RegionShortcut.PARTITION)
+ .setPartitionAttributes(
+ new
PartitionAttributesFactory().setRedundantCopies(0).setLocalMaxMemory(0).create())
+ .create("region1");
+ cache.createRegionFactory(RegionShortcut.PARTITION)
+ .setPartitionAttributes(new
PartitionAttributesFactory().setColocatedWith("region1")
+ .setRedundantCopies(0).setLocalMaxMemory(0).create())
+ .create("region2");
+ });
}
enum WhenToInvokeBackup {
diff --git
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/BackupIntegrationTest.java
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/BackupIntegrationTest.java
index ee587380eb..78ab872bfd 100644
---
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/BackupIntegrationTest.java
+++
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/BackupIntegrationTest.java
@@ -319,7 +319,7 @@ public void testBackupCacheXml() throws Exception {
backup.prepareForBackup();
backup.doBackup(backupDir, null, false);
Collection<File> fileCollection = FileUtils.listFiles(backupDir,
- new RegexFileFilter("cache.xml"), DirectoryFileFilter.DIRECTORY);
+ new RegexFileFilter("BackupIntegrationTest.cache.xml"),
DirectoryFileFilter.DIRECTORY);
assertEquals(1, fileCollection.size());
File cacheXmlBackup = fileCollection.iterator().next();
assertTrue(cacheXmlBackup.exists());
diff --git
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/BackupPrepareAndFinishMsgDUnitTest.java
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/BackupPrepareAndFinishMsgDUnitTest.java
index f48f7f355a..1b30f749e7 100644
---
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/BackupPrepareAndFinishMsgDUnitTest.java
+++
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/BackupPrepareAndFinishMsgDUnitTest.java
@@ -236,5 +236,4 @@ public void beforeWaitForBackupCompletion() {
}
return regionFactory.create(TEST_REGION_NAME);
}
-
}
diff --git
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/BackupReplyProcessorTest.java
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/BackupReplyProcessorTest.java
index e01485285c..a32f78a261 100644
---
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/BackupReplyProcessorTest.java
+++
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/BackupReplyProcessorTest.java
@@ -14,8 +14,12 @@
*/
package org.apache.geode.internal.cache.backup;
-import static org.assertj.core.api.Assertions.*;
-import static org.mockito.Mockito.*;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.mockito.Mockito.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
import java.util.HashSet;
import java.util.Set;
diff --git
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/FileSystemBackupDestinationTest.java
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/FileSystemBackupDestinationTest.java
new file mode 100644
index 0000000000..752d88cb41
--- /dev/null
+++
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/FileSystemBackupDestinationTest.java
@@ -0,0 +1,199 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
contributor license
+ * agreements. See the NOTICE file distributed with this work for additional
information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache
License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express
+ * or implied. See the License for the specific language governing permissions
and limitations under
+ * the License.
+ */
+package org.apache.geode.internal.cache.backup;
+
+import static
org.apache.geode.internal.cache.backup.BackupDestination.CONFIG_DIRECTORY;
+import static
org.apache.geode.internal.cache.backup.BackupDestination.DATA_STORES_DIRECTORY;
+import static
org.apache.geode.internal.cache.backup.BackupDestination.DEPLOYED_JARS_DIRECTORY;
+import static
org.apache.geode.internal.cache.backup.BackupDestination.README_FILE;
+import static
org.apache.geode.internal.cache.backup.BackupDestination.USER_FILES_DIRECTORY;
+import static
org.apache.geode.internal.cache.backup.FileSystemBackupDestination.INCOMPLETE_BACKUP_FILE;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.rules.TemporaryFolder;
+
+import org.apache.geode.internal.cache.DiskStoreImpl;
+import org.apache.geode.internal.cache.GemFireCacheImpl;
+import org.apache.geode.internal.cache.Oplog;
+import org.apache.geode.internal.cache.persistence.DiskStoreID;
+import org.apache.geode.test.junit.categories.IntegrationTest;
+
+@Category(IntegrationTest.class)
+public class FileSystemBackupDestinationTest {
+
+ @Rule
+ public TemporaryFolder tempDir = new TemporaryFolder();
+
+ private BackupDefinition backupDefinition;
+ private Path targetDir;
+ private RestoreScript restoreScript;
+
+ @Before
+ public void setup() throws IOException {
+ backupDefinition = new BackupDefinition();
+ Path backupDirectory = tempDir.newFolder("backups").toPath();
+ targetDir = backupDirectory.resolve("backupTarget");
+ restoreScript = mock(RestoreScript.class);
+ when(restoreScript.generate(any())).thenReturn(tempDir.newFile());
+ }
+
+ @Test
+ public void userFilesAreBackedUp() throws Exception {
+ Path userFile = tempDir.newFile("userFile").toPath();
+ Path userSubdir = tempDir.newFolder("userSubDir").toPath();
+ Path userFileInDir = Files.write(userSubdir.resolve("fileInDir"), new
byte[] {});
+ backupDefinition.addUserFilesToBackup(userFile);
+ backupDefinition.addUserFilesToBackup(userSubdir);
+
+ executeBackup();
+
+ Path userDir = targetDir.resolve(USER_FILES_DIRECTORY);
+ assertThat(userDir.resolve(userFile.getFileName())).exists();
+ assertThat(userDir.resolve(userSubdir.getFileName())).exists();
+
assertThat(userDir.resolve(userSubdir.getFileName()).resolve(userFileInDir.getFileName()))
+ .exists();
+ }
+
+ @Test
+ public void deployedJarsAreBackedUp() throws Exception {
+ Path jarFile = tempDir.newFile("jarFile").toPath();
+ Path jarSubdir = tempDir.newFolder("jarSubdir").toPath();
+ Path jarInSubdir = Files.write(jarSubdir.resolve("jarInSubdir"), new
byte[] {});
+ backupDefinition.addDeployedJarToBackup(jarFile);
+ backupDefinition.addDeployedJarToBackup(jarSubdir);
+
+ executeBackup();
+
+ Path userDir = targetDir.resolve(DEPLOYED_JARS_DIRECTORY);
+ assertThat(userDir.resolve(jarFile.getFileName())).exists();
+ assertThat(userDir.resolve(jarSubdir.getFileName())).exists();
+
assertThat(userDir.resolve(jarSubdir.getFileName()).resolve(jarInSubdir.getFileName()))
+ .exists();
+ }
+
+ @Test
+ public void configFilesAreBackedUp() throws Exception {
+ Path cacheXml = tempDir.newFile("cache.xml").toPath();
+ Path propertyFile = tempDir.newFile("properties").toPath();
+ backupDefinition.addConfigFileToBackup(cacheXml);
+ backupDefinition.addConfigFileToBackup(propertyFile);
+
+ executeBackup();
+
+ Path configDir = targetDir.resolve(CONFIG_DIRECTORY);
+ assertThat(configDir.resolve(cacheXml.getFileName())).exists();
+ assertThat(configDir.resolve(propertyFile.getFileName())).exists();
+ }
+
+ @Test
+ public void oplogFilesAreBackedUp() throws Exception {
+ DiskStoreImpl diskStore = mock(DiskStoreImpl.class);
+ when(diskStore.getDiskStoreID()).thenReturn(new DiskStoreID(1, 2));
+ Oplog oplog = mock(Oplog.class);
+ when(oplog.getCrfFile()).thenReturn(tempDir.newFile("crf"));
+ when(oplog.getDrfFile()).thenReturn(tempDir.newFile("drf"));
+ when(oplog.getKrfFile()).thenReturn(tempDir.newFile("krf"));
+ when(diskStore.getInforFileDirIndex()).thenReturn(1);
+
+ backupDefinition.addOplogFileToBackup(diskStore,
oplog.getCrfFile().toPath());
+ backupDefinition.addOplogFileToBackup(diskStore,
oplog.getDrfFile().toPath());
+ backupDefinition.addOplogFileToBackup(diskStore,
oplog.getKrfFile().toPath());
+
+ executeBackup();
+
+ Path diskStoreDir = targetDir.resolve(DATA_STORES_DIRECTORY)
+ .resolve(GemFireCacheImpl.getDefaultDiskStoreName() + "_1-2");
+ assertThat(diskStoreDir.resolve("dir1").resolve("crf")).exists();
+ assertThat(diskStoreDir.resolve("dir1").resolve("drf")).exists();
+ assertThat(diskStoreDir.resolve("dir1").resolve("krf")).exists();
+ }
+
+ @Test
+ public void diskInitFilesAreBackedUp() throws Exception {
+ DiskStoreImpl diskStore1 = mock(DiskStoreImpl.class);
+ when(diskStore1.getDiskStoreID()).thenReturn(new DiskStoreID(1, 2));
+ when(diskStore1.getInforFileDirIndex()).thenReturn(1);
+ DiskStoreImpl diskStore2 = mock(DiskStoreImpl.class);
+ when(diskStore2.getDiskStoreID()).thenReturn(new DiskStoreID(1, 2));
+ when(diskStore2.getInforFileDirIndex()).thenReturn(2);
+ Path initFile1 = tempDir.newFolder("dir1").toPath().resolve("initFile1");
+ Path initFile2 = tempDir.newFolder("dir2").toPath().resolve("initFile2");
+ Files.createFile(initFile1);
+ Files.createFile(initFile2);
+ backupDefinition.addDiskInitFile(diskStore1, initFile1);
+ backupDefinition.addDiskInitFile(diskStore2, initFile2);
+
+ executeBackup();
+
+ Path diskStoreDir = targetDir.resolve(DATA_STORES_DIRECTORY)
+ .resolve(GemFireCacheImpl.getDefaultDiskStoreName() + "_1-2");
+ assertThat(diskStoreDir.resolve("dir1").resolve("initFile1")).exists();
+ assertThat(diskStoreDir.resolve("dir2").resolve("initFile2")).exists();
+ }
+
+ @Test
+ public void restoreScriptIsBackedUp() throws Exception {
+ Path restoreScriptPath = tempDir.newFile("restoreScript").toPath();
+ when(restoreScript.generate(any())).thenReturn(restoreScriptPath.toFile());
+ backupDefinition.setRestoreScript(restoreScript);
+
+ executeBackup();
+
+ assertThat(targetDir.resolve("restoreScript")).exists();
+ }
+
+ @Test
+ public void backupContainsReadMe() throws IOException {
+ executeBackup();
+
+ assertThat(targetDir.resolve(README_FILE)).exists();
+ }
+
+ @Test
+ public void leavesBehindIncompleteFileOnFailure() throws Exception {
+ Path notCreatedFile =
tempDir.newFolder("dir1").toPath().resolve("notCreated");
+ backupDefinition.addDeployedJarToBackup(notCreatedFile);
+
+ try {
+ executeBackup();
+ } catch (IOException ignore) {
+ // expected to occur on missing file
+ }
+
+ assertThat(targetDir.resolve(INCOMPLETE_BACKUP_FILE)).exists();
+ }
+
+ @Test
+ public void doesNotLeaveBehindIncompleteFileOnSuccess() throws Exception {
+ executeBackup();
+ assertThat(targetDir.resolve(INCOMPLETE_BACKUP_FILE)).doesNotExist();
+ }
+
+ private void executeBackup() throws IOException {
+ BackupDestination backupDestination = new
FileSystemBackupDestination(targetDir);
+ backupDestination.backupFiles(backupDefinition);
+ }
+}
diff --git
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/FinishBackupFactoryTest.java
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/FinishBackupFactoryTest.java
index ce2e7f472b..b91ec19980 100644
---
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/FinishBackupFactoryTest.java
+++
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/FinishBackupFactoryTest.java
@@ -14,8 +14,9 @@
*/
package org.apache.geode.internal.cache.backup;
-import static org.assertj.core.api.Assertions.*;
-import static org.mockito.Mockito.*;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
import java.io.File;
import java.util.HashSet;
@@ -26,7 +27,6 @@
import org.junit.experimental.categories.Category;
import org.apache.geode.CancelCriterion;
-import org.apache.geode.cache.persistence.PersistentID;
import org.apache.geode.distributed.internal.DM;
import org.apache.geode.distributed.internal.InternalDistributedSystem;
import
org.apache.geode.distributed.internal.membership.InternalDistributedMember;
diff --git
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/FinishBackupOperationTest.java
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/FinishBackupOperationTest.java
index c0972e8112..dc68f35b59 100644
---
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/FinishBackupOperationTest.java
+++
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/FinishBackupOperationTest.java
@@ -14,8 +14,15 @@
*/
package org.apache.geode.internal.cache.backup;
-import static org.assertj.core.api.Assertions.*;
-import static org.mockito.Mockito.*;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.eq;
+import static org.mockito.Mockito.inOrder;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
import java.io.File;
import java.io.IOException;
diff --git
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/FlushToDiskFactoryTest.java
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/FlushToDiskFactoryTest.java
index cfb62ab8cf..e3245c06c7 100644
---
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/FlushToDiskFactoryTest.java
+++
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/FlushToDiskFactoryTest.java
@@ -14,10 +14,10 @@
*/
package org.apache.geode.internal.cache.backup;
-import static org.assertj.core.api.Assertions.*;
-import static org.mockito.Mockito.*;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
-import java.io.File;
import java.util.HashSet;
import java.util.Set;
@@ -26,7 +26,6 @@
import org.junit.experimental.categories.Category;
import org.apache.geode.CancelCriterion;
-import org.apache.geode.cache.persistence.PersistentID;
import org.apache.geode.distributed.internal.DM;
import org.apache.geode.distributed.internal.InternalDistributedSystem;
import
org.apache.geode.distributed.internal.membership.InternalDistributedMember;
diff --git
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/FlushToDiskOperationTest.java
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/FlushToDiskOperationTest.java
index a79b43f7ef..7e4364930e 100644
---
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/FlushToDiskOperationTest.java
+++
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/FlushToDiskOperationTest.java
@@ -14,7 +14,13 @@
*/
package org.apache.geode.internal.cache.backup;
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.eq;
+import static org.mockito.Mockito.inOrder;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
import java.util.HashSet;
import java.util.Set;
diff --git
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/FlushToDiskRequestTest.java
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/FlushToDiskRequestTest.java
index 113bb70d50..762920c9e7 100644
---
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/FlushToDiskRequestTest.java
+++
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/FlushToDiskRequestTest.java
@@ -14,8 +14,12 @@
*/
package org.apache.geode.internal.cache.backup;
-import static org.assertj.core.api.Assertions.*;
-import static org.mockito.Mockito.*;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.mockito.Mockito.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
import java.util.HashSet;
import java.util.Set;
diff --git
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/IncrementalBackupDistributedTest.java
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/IncrementalBackupDistributedTest.java
index 69b6478919..d71bb85f49 100644
---
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/IncrementalBackupDistributedTest.java
+++
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/IncrementalBackupDistributedTest.java
@@ -27,6 +27,7 @@
import java.io.InputStreamReader;
import java.nio.file.Files;
import java.util.Collection;
+import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
@@ -37,6 +38,8 @@
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.filefilter.DirectoryFileFilter;
import org.apache.commons.io.filefilter.RegexFileFilter;
+import org.apache.logging.log4j.Logger;
+import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@@ -45,6 +48,8 @@
import org.apache.geode.cache.Cache;
import org.apache.geode.cache.CacheFactory;
import org.apache.geode.cache.DiskStore;
+import org.apache.geode.cache.PartitionAttributes;
+import org.apache.geode.cache.PartitionAttributesFactory;
import org.apache.geode.cache.Region;
import org.apache.geode.cache.RegionFactory;
import org.apache.geode.cache.RegionShortcut;
@@ -56,7 +61,7 @@
import org.apache.geode.internal.DeployedJar;
import org.apache.geode.internal.cache.DiskStoreImpl;
import org.apache.geode.internal.cache.InternalCache;
-import org.apache.geode.internal.util.IOUtils;
+import org.apache.geode.internal.logging.LogService;
import org.apache.geode.internal.util.TransformUtils;
import org.apache.geode.management.BackupStatus;
import org.apache.geode.management.ManagementException;
@@ -70,6 +75,7 @@
import org.apache.geode.test.dunit.WaitCriterion;
import org.apache.geode.test.dunit.cache.internal.JUnit4CacheTestCase;
import org.apache.geode.test.junit.categories.DistributedTest;
+import
org.apache.geode.test.junit.rules.serializable.SerializableTemporaryFolder;
/**
* Tests for the incremental backup feature.
@@ -77,6 +83,9 @@
@Category(DistributedTest.class)
@SuppressWarnings("serial")
public class IncrementalBackupDistributedTest extends JUnit4CacheTestCase {
+
+ private static final Logger logger = LogService.getLogger();
+
/**
* Data load increment.
*/
@@ -97,22 +106,37 @@
*/
private static final String OPLOG_REGEX = ".*\\.[kdc]rf$";
+ @Rule
+ public SerializableTemporaryFolder tempDir = new
SerializableTemporaryFolder();
+
+ private final Map<Integer, File> baseDirectoryByVm = new HashMap<>();
+
/**
* Creates test regions for a member.
*/
- private final SerializableRunnable createRegions = new
SerializableRunnable() {
- @Override
- public void run() {
- Cache cache = getCache(new CacheFactory().set(LOG_LEVEL,
LogWriterUtils.getDUnitLogLevel()));
-
cache.createDiskStoreFactory().setDiskDirs(getDiskDirs()).create("fooStore");
-
cache.createDiskStoreFactory().setDiskDirs(getDiskDirs()).create("barStore");
- getRegionFactory(cache).setDiskStoreName("fooStore").create("fooRegion");
- getRegionFactory(cache).setDiskStoreName("barStore").create("barRegion");
- }
- };
+ private void createRegions(File baseDirectory, int vmNumber) throws
IOException {
+ Cache cache = getCache(new CacheFactory().set(LOG_LEVEL,
LogWriterUtils.getDUnitLogLevel()));
+ cache.createDiskStoreFactory().setDiskDirs(getDiskDirectory(baseDirectory,
vmNumber))
+ .create("fooStore");
+ cache.createDiskStoreFactory().setDiskDirs(getDiskDirectory(baseDirectory,
vmNumber))
+ .create("barStore");
+ getRegionFactory(cache).setDiskStoreName("fooStore").create("fooRegion");
+ getRegionFactory(cache).setDiskStoreName("barStore").create("barRegion");
+ }
+
+ private File[] getDiskDirectory(File parent, int vmNumber) throws
IOException {
+ File dir = new File(parent, "disk" +
String.valueOf(vmNumber)).getAbsoluteFile();
+ dir.mkdirs();
+ return new File[] {dir};
+ }
private RegionFactory<Integer, String> getRegionFactory(Cache cache) {
- return cache.createRegionFactory(RegionShortcut.PARTITION_PERSISTENT);
+ PartitionAttributes<Integer, String> attributes =
+ new PartitionAttributesFactory<Integer,
String>().setTotalNumBuckets(5).create();
+ RegionFactory<Integer, String> factory =
+ cache.<Integer,
String>createRegionFactory(RegionShortcut.PARTITION_PERSISTENT)
+ .setPartitionAttributes(attributes);
+ return factory;
}
/**
@@ -123,15 +147,6 @@ public void run() {
return file.isDirectory() && file.getName().startsWith("20");
};
- /**
- * Abstracts the logging mechanism.
- *
- * @param message a message to log.
- */
- private void log(String message) {
- LogWriterUtils.getLogWriter().info("[IncrementalBackupDistributedTest] " +
message);
- }
-
/**
* @return the baseline backup directory.
*/
@@ -171,21 +186,6 @@ private static File getIncrementalDir() {
return dir;
}
- /**
- * Returns the directory for a given member.
- *
- * @param vm a distributed system member.
- * @return the disk directories for a member.
- */
- private File getVMDir(VM vm) {
- return (File) vm.invoke(new SerializableCallable() {
- @Override
- public Object call() throws Exception {
- return IOUtils.tryGetCanonicalFileElseGetAbsoluteFile(new
File(getDiskDirs()[0], "../.."));
- }
- });
- }
-
/**
* Invokes {@link AdminDistributedSystem#getMissingPersistentMembers()} on a
member.
*
@@ -210,16 +210,12 @@ public Object call() {
* @return the status of the backup.
*/
private BackupStatus baseline(VM vm) {
- return (BackupStatus) vm.invoke(new SerializableCallable("Backup all
members.") {
- @Override
- public Object call() {
- try {
- return
BackupUtil.backupAllMembers(getSystem().getDistributionManager(),
getBaselineDir(),
- null);
-
- } catch (ManagementException e) {
- throw new RuntimeException(e);
- }
+ return vm.invoke(() -> {
+ try {
+ return
BackupUtil.backupAllMembers(getSystem().getDistributionManager(),
getBaselineDir(),
+ null);
+ } catch (ManagementException e) {
+ throw new RuntimeException(e);
}
});
}
@@ -231,16 +227,12 @@ public Object call() {
* @return a status of the backup operation.
*/
private BackupStatus incremental(VM vm) {
- return (BackupStatus) vm.invoke(new SerializableCallable("Backup all
members.") {
- @Override
- public Object call() {
- try {
- return
BackupUtil.backupAllMembers(getSystem().getDistributionManager(),
- getIncrementalDir(), getBaselineBackupDir());
-
- } catch (ManagementException e) {
- throw new RuntimeException(e);
- }
+ return vm.invoke(() -> {
+ try {
+ return
BackupUtil.backupAllMembers(getSystem().getDistributionManager(),
+ getIncrementalDir(), getBaselineBackupDir());
+ } catch (ManagementException e) {
+ throw new RuntimeException(e);
}
});
}
@@ -252,16 +244,12 @@ public Object call() {
* @return a status of the backup operation.
*/
private BackupStatus incremental2(VM vm) {
- return (BackupStatus) vm.invoke(new SerializableCallable("Backup all
members.") {
- @Override
- public Object call() {
- try {
- return
BackupUtil.backupAllMembers(getSystem().getDistributionManager(),
- getIncremental2Dir(), getIncrementalBackupDir());
-
- } catch (ManagementException e) {
- throw new RuntimeException(e);
- }
+ return vm.invoke(() -> {
+ try {
+ return
BackupUtil.backupAllMembers(getSystem().getDistributionManager(),
+ getIncremental2Dir(), getIncrementalBackupDir());
+ } catch (ManagementException e) {
+ throw new RuntimeException(e);
}
});
}
@@ -273,13 +261,8 @@ public Object call() {
* @return the member's id.
*/
private String getMemberId(VM vm) {
- return (String) vm.invoke(new SerializableCallable("getMemberId") {
- @Override
- public Object call() throws Exception {
- return
getCache().getDistributedSystem().getDistributedMember().toString()
- .replaceAll("[^\\w]+", "_");
- }
- });
+ return vm.invoke(() ->
getCache().getDistributedSystem().getDistributedMember().toString()
+ .replaceAll("[^\\w]+", "_"));
}
/**
@@ -334,7 +317,7 @@ private PersistentID getPersistentID(final VM vm) {
private PersistentID disconnect(final VM disconnectVM, final VM testVM) {
final PersistentID id = disconnectVM.invoke(() -> {
PersistentID persistentID = null;
- Collection<DiskStore> diskStores = ((InternalCache)
getCache()).listDiskStores();
+ Collection<DiskStore> diskStores = getCache().listDiskStores();
for (DiskStore diskStore : diskStores) {
if (diskStore.getName().equals("fooStore")) {
persistentID = ((DiskStoreImpl) diskStore).getPersistentID();
@@ -371,8 +354,19 @@ public String description() {
*
* @param vm a member of the distributed system.
*/
- private void openCache(VM vm) {
- vm.invoke(this.createRegions);
+ private void openCache(VM vm) throws IOException {
+ int vmNumber = vm.getId();
+ File vmDir = getBaseDir(vmNumber);
+ vm.invoke(() -> createRegions(vmDir, vmNumber));
+ }
+
+ private File getBaseDir(int vmNumber) throws IOException {
+ File baseDir = baseDirectoryByVm.get(vmNumber);
+ if (baseDir == null) {
+ baseDir = tempDir.newFolder("vm" + vmNumber);
+ baseDirectoryByVm.put(vmNumber, baseDir);
+ }
+ return baseDir;
}
/**
@@ -442,15 +436,6 @@ private static File getIncrementalBackupDir() {
return dirs[0];
}
- /**
- * @return the directory for the completed baseline backup.
- */
- private static File getIncremental2BackupDir() {
- File[] dirs = getIncremental2Dir().listFiles(backupDirFilter);
- assertEquals(1, dirs.length);
- return dirs[0];
- }
-
/**
* Returns an individual member's backup directory.
*
@@ -478,7 +463,7 @@ public boolean accept(File file) {
* Adds the data region to every participating VM.
*/
@SuppressWarnings("serial")
- private void createDataRegions() {
+ private void createDataRegions() throws IOException {
Host host = Host.getHost(0);
int numberOfVms = host.getVMCount();
@@ -512,12 +497,11 @@ public void run() {
do {
line = reader.readLine();
- log(line);
} while (null != line);
reader.close();
} catch (IOException e) {
- log("Execute: error while reading standard in: " + e.getMessage());
+ logger.info(e);
}
}
}).start();
@@ -539,7 +523,7 @@ public void run() {
reader.close();
} catch (IOException e) {
- log("Execute: error while reading standard error: " +
e.getMessage());
+ logger.info(e);
}
}
}).start();
@@ -644,10 +628,9 @@ private void assertBackupStatus(final BackupStatus
backupStatus) {
@Override
public final void postSetUp() throws Exception {
createDataRegions();
- this.createRegions.run();
+ File dir = getBaseDir(-1);
+ createRegions(dir, -1);
loadMoreData();
-
- log("Data region created and populated.");
}
/**
@@ -670,7 +653,8 @@ public final void preTearDownCacheTestCase() throws
Exception {
public void testIncrementalBackup() throws Exception {
String memberId = getMemberId(Host.getHost(0).getVM(1));
- File memberDir = getVMDir(Host.getHost(0).getVM(1));
+ File memberDir = baseDirectoryByVm.get(1);
+ // getVMDir(Host.getHost(0).getVM(1));
assertNotNull(memberDir);
// Find all of the member's oplogs in the disk directory
(*.crf,*.krf,*.drf)
@@ -707,9 +691,6 @@ public void testIncrementalBackup() throws Exception {
TransformUtils.transform(memberIncrementalOplogs,
memberIncrementalOplogNames,
TransformUtils.fileNameTransformer);
- log("BASELINE OPLOGS = " + memberBaselineOplogNames);
- log("INCREMENTAL OPLOGS = " + memberIncrementalOplogNames);
-
/*
* Assert that the incremental backup does not contain baseline operation
logs that the member
* still has copies of.
@@ -732,8 +713,6 @@ public void testIncrementalBackup() throws Exception {
TransformUtils.transform(memberIncremental2Oplogs,
memberIncremental2OplogNames,
TransformUtils.fileNameTransformer);
- log("INCREMENTAL 2 OPLOGS = " + memberIncremental2OplogNames);
-
/*
* Assert that the second incremental backup does not contain operation
logs copied into the
* baseline.
@@ -1089,16 +1068,12 @@ public void testBackupUserDeployedJarFiles() throws
Exception {
/*
* Remove the "dummy" jar from the VM.
*/
- vm0.invoke(new SerializableCallable() {
- @Override
- public Object call() throws Exception {
- for (DeployedJar jarClassLoader :
ClassPathLoader.getLatest().getJarDeployer()
- .findDeployedJars()) {
- if (jarClassLoader.getJarName().startsWith(jarName)) {
-
ClassPathLoader.getLatest().getJarDeployer().undeploy(jarClassLoader.getJarName());
- }
+ vm0.invoke(() -> {
+ for (DeployedJar jarClassLoader :
ClassPathLoader.getLatest().getJarDeployer()
+ .findDeployedJars()) {
+ if (jarClassLoader.getJarName().startsWith(jarName)) {
+
ClassPathLoader.getLatest().getJarDeployer().undeploy(jarClassLoader.getJarName());
}
- return null;
}
});
diff --git
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/PrepareBackupFactoryTest.java
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/PrepareBackupFactoryTest.java
index 41431a35b2..3cf89cd709 100644
---
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/PrepareBackupFactoryTest.java
+++
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/PrepareBackupFactoryTest.java
@@ -14,8 +14,9 @@
*/
package org.apache.geode.internal.cache.backup;
-import static org.assertj.core.api.Assertions.*;
-import static org.mockito.Mockito.*;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
import java.util.HashSet;
import java.util.Set;
diff --git
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/PrepareBackupOperationTest.java
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/PrepareBackupOperationTest.java
index 2e4246aeba..71dd0c136c 100644
---
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/PrepareBackupOperationTest.java
+++
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/PrepareBackupOperationTest.java
@@ -14,8 +14,15 @@
*/
package org.apache.geode.internal.cache.backup;
-import static org.assertj.core.api.Assertions.*;
-import static org.mockito.Mockito.*;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.eq;
+import static org.mockito.Mockito.inOrder;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
import java.io.IOException;
import java.util.HashSet;
diff --git
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/PrepareBackupRequestTest.java
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/PrepareBackupRequestTest.java
index f1700bc185..dd0e643c8f 100644
---
a/geode-core/src/test/java/org/apache/geode/internal/cache/backup/PrepareBackupRequestTest.java
+++
b/geode-core/src/test/java/org/apache/geode/internal/cache/backup/PrepareBackupRequestTest.java
@@ -14,8 +14,12 @@
*/
package org.apache.geode.internal.cache.backup;
-import static org.assertj.core.api.Assertions.*;
-import static org.mockito.Mockito.*;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.mockito.Mockito.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
import java.io.IOException;
import java.util.HashSet;
diff --git
a/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/PersistentColocatedPartitionedRegionDUnitTest.java
b/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/PersistentColocatedPartitionedRegionDUnitTest.java
index d7c7063256..7a60eaf0f1 100644
---
a/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/PersistentColocatedPartitionedRegionDUnitTest.java
+++
b/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/PersistentColocatedPartitionedRegionDUnitTest.java
@@ -2138,144 +2138,6 @@ public Object call() throws Exception {
checkData(vm0, 0, NUM_BUCKETS, "a", "region2");
}
- /**
- * Test what happens when we restart persistent members while there is an
accessor concurrently
- * performing puts. This is for bug 43899
- */
- @Test
- public void testRecoverySystemWithConcurrentPutter() throws Throwable {
- Host host = Host.getHost(0);
- VM vm0 = host.getVM(0);
- VM vm1 = host.getVM(1);
- VM vm2 = host.getVM(2);
- VM vm3 = host.getVM(3);
-
- // Define all of the runnables used in this test
-
- // runnable to create accessors
- SerializableRunnable createAccessor = new
SerializableRunnable("createAccessor") {
- public void run() {
- Cache cache = getCache();
-
- AttributesFactory af = new AttributesFactory();
- PartitionAttributesFactory paf = new PartitionAttributesFactory();
- paf.setRedundantCopies(1);
- paf.setLocalMaxMemory(0);
- af.setPartitionAttributes(paf.create());
- af.setDataPolicy(DataPolicy.PARTITION);
- cache.createRegion(getPartitionedRegionName(), af.create());
-
- paf.setColocatedWith(getPartitionedRegionName());
- af.setPartitionAttributes(paf.create());
- cache.createRegion("region2", af.create());
- }
- };
-
- // runnable to create PRs
- SerializableRunnable createPRs = new SerializableRunnable("createPRs") {
- public void run() {
- Cache cache = getCache();
-
- DiskStore ds = cache.findDiskStore("disk");
- if (ds == null) {
- ds =
cache.createDiskStoreFactory().setDiskDirs(getDiskDirs()).create("disk");
- }
- AttributesFactory af = new AttributesFactory();
- PartitionAttributesFactory paf = new PartitionAttributesFactory();
- paf.setRedundantCopies(1);
- af.setPartitionAttributes(paf.create());
- af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
- af.setDiskStoreName("disk");
- cache.createRegion(getPartitionedRegionName(), af.create());
-
- paf.setColocatedWith(getPartitionedRegionName());
- af.setPartitionAttributes(paf.create());
- cache.createRegion("region2", af.create());
- }
- };
-
- // runnable to close the cache.
- SerializableRunnable closeCache = new SerializableRunnable("closeCache") {
- public void run() {
- closeCache();
- }
- };
-
- // Runnable to do a bunch of puts handle exceptions
- // due to the fact that member is offline.
- SerializableRunnable doABunchOfPuts = new
SerializableRunnable("doABunchOfPuts") {
- public void run() {
- Cache cache = getCache();
- Region region = cache.getRegion(getPartitionedRegionName());
- try {
- for (int i = 0;; i++) {
- try {
- region.get(i % NUM_BUCKETS);
- } catch (PartitionOfflineException expected) {
- // do nothing.
- } catch (PartitionedRegionStorageException expected) {
- // do nothing.
- }
- Thread.yield();
- }
- } catch (CacheClosedException expected) {
- // ok, we're done.
- }
- }
- };
-
-
- // Runnable to clean up disk dirs on a members
- SerializableRunnable cleanDiskDirs = new SerializableRunnable("Clean disk
dirs") {
- public void run() {
- cleanDiskDirs();
- }
- };
-
- // Create the PR two members
- vm1.invoke(createPRs);
- vm2.invoke(createPRs);
-
- // create the accessor.
- vm0.invoke(createAccessor);
-
-
- // Create some buckets.
- createData(vm0, 0, NUM_BUCKETS, "a");
- createData(vm0, 0, NUM_BUCKETS, "a", "region2");
-
-
- // backup the system. We use this to get a snapshot of vm1 and vm2
- // when they both are online. Recovering from this backup simulates
- // a simulataneous kill and recovery.
- backup(vm3);
-
- // close vm1 and vm2.
- vm1.invoke(closeCache);
- vm2.invoke(closeCache);
-
- // restore the backup
- vm1.invoke(cleanDiskDirs);
- vm2.invoke(cleanDiskDirs);
- restoreBackup(2);
-
- // in vm0, start doing a bunch of concurrent puts.
- AsyncInvocation async0 = vm0.invokeAsync(doABunchOfPuts);
-
- // This recovery should not hang (that's what we're testing for
- // here.
- AsyncInvocation async1 = vm1.invokeAsync(createPRs);
- AsyncInvocation async2 = vm2.invokeAsync(createPRs);
- async1.getResult(MAX_WAIT);
- async2.getResult(MAX_WAIT);
-
- // close the cache in vm0 to stop the async puts.
- vm0.invoke(closeCache);
-
- // make sure we didn't get an exception
- async0.getResult(MAX_WAIT);
- }
-
@Category(FlakyTest.class) // GEODE-506: time sensitive, async actions with
30 sec max
@Test
public void testRebalanceWithOfflineChildRegion() throws Throwable {
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
> Create plugin system for specifying where a backup is stored
> ------------------------------------------------------------
>
> Key: GEODE-3799
> URL: https://issues.apache.org/jira/browse/GEODE-3799
> Project: Geode
> Issue Type: Sub-task
> Components: persistence
> Reporter: Nick Reich
> Assignee: Nick Reich
>
> The current logic merges the moving/copying of files with the determining of
> what to backup. To make it possible to store a backup in cloud storage or
> other locations, we need to separate these concerns, putting the variable,
> location-based logic, into a plugin architecture.
--
This message was sent by Atlassian JIRA
(v6.4.14#64029)