This is an automated email from the ASF dual-hosted git repository.

mivanac pushed a commit to branch develop
in repository https://gitbox.apache.org/repos/asf/geode.git


The following commit(s) were added to refs/heads/develop by this push:
     new e4d2f16c5d GEODE-10419: Enhancment of backup disk-store command (#7851)
e4d2f16c5d is described below

commit e4d2f16c5dad27a96f39a4bec695572040bcedb7
Author: Mario Ivanac <48509724+miva...@users.noreply.github.com>
AuthorDate: Wed Sep 14 16:39:18 2022 +0200

    GEODE-10419: Enhancment of backup disk-store command (#7851)
    
    * GEODE-10419: initial commit
    
    * GEODE-10419: documentation impacts
    
    * GEODE-10419: added DT
---
 .../DistributedSystemBridgeIntegrationTest.java    |   4 +-
 .../InternalConfigurationPersistenceService.java   |   2 +-
 .../internal/cache/backup/BackupConfigFactory.java |  10 +
 .../internal/cache/backup/BackupOperation.java     |   8 +
 .../geode/internal/cache/backup/BackupService.java |   8 +-
 .../geode/internal/cache/backup/BackupTask.java    |  36 ++-
 .../cache/backup/FileSystemBackupWriterConfig.java |   1 +
 .../geode/internal/cache/backup/PrepareBackup.java |   8 +-
 .../cache/backup/PrepareBackupFactory.java         |   5 +-
 .../geode/management/internal/i18n/CliStrings.java |   8 +
 .../gfsh/command-pages/backup.html.md.erb          |  10 +-
 .../commands/BackupDiskStoreCommandDUnitTest.java  | 273 +++++++++++++++++++++
 .../cli/commands/BackupDiskStoreCommand.java       |  40 ++-
 13 files changed, 399 insertions(+), 14 deletions(-)

diff --git 
a/geode-core/src/integrationTest/java/org/apache/geode/management/internal/beans/DistributedSystemBridgeIntegrationTest.java
 
b/geode-core/src/integrationTest/java/org/apache/geode/management/internal/beans/DistributedSystemBridgeIntegrationTest.java
index 9c184c843f..6f53fa411a 100644
--- 
a/geode-core/src/integrationTest/java/org/apache/geode/management/internal/beans/DistributedSystemBridgeIntegrationTest.java
+++ 
b/geode-core/src/integrationTest/java/org/apache/geode/management/internal/beans/DistributedSystemBridgeIntegrationTest.java
@@ -19,6 +19,7 @@ import static org.assertj.core.api.Assertions.assertThat;
 import static org.assertj.core.api.Assertions.assertThatThrownBy;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.ArgumentMatchers.anyLong;
+import static org.mockito.ArgumentMatchers.eq;
 import static org.mockito.ArgumentMatchers.isA;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.inOrder;
@@ -68,7 +69,6 @@ public class DistributedSystemBridgeIntegrationTest {
     backupService = mock(BackupService.class);
     when(cache.getBackupService()).thenReturn(backupService);
     when(cache.getPersistentMemberManager()).thenReturn(memberManager);
-    when(cache.getBackupService()).thenReturn(backupService);
 
     DLockService dlock = mock(DLockService.class);
     when(dlock.lock(any(), anyLong(), anyLong())).thenReturn(true);
@@ -114,7 +114,7 @@ public class DistributedSystemBridgeIntegrationTest {
 
     InOrder inOrder = inOrder(dm, backupService);
     inOrder.verify(dm).putOutgoing(isA(PrepareBackupRequest.class));
-    inOrder.verify(backupService).prepareBackup(any(), any());
+    inOrder.verify(backupService).prepareBackup(any(), any(), eq(null));
     inOrder.verify(dm).putOutgoing(isA(FinishBackupRequest.class));
     inOrder.verify(backupService).doBackup();
   }
diff --git 
a/geode-core/src/main/java/org/apache/geode/distributed/internal/InternalConfigurationPersistenceService.java
 
b/geode-core/src/main/java/org/apache/geode/distributed/internal/InternalConfigurationPersistenceService.java
index f59591fe7f..11116d68af 100644
--- 
a/geode-core/src/main/java/org/apache/geode/distributed/internal/InternalConfigurationPersistenceService.java
+++ 
b/geode-core/src/main/java/org/apache/geode/distributed/internal/InternalConfigurationPersistenceService.java
@@ -95,7 +95,7 @@ public class InternalConfigurationPersistenceService 
implements ConfigurationPer
    */
   public static final String CLUSTER_CONFIG_ARTIFACTS_DIR_NAME = 
"cluster_config";
 
-  private static final String CLUSTER_CONFIG_DISK_STORE_NAME = 
"cluster_config";
+  public static final String CLUSTER_CONFIG_DISK_STORE_NAME = "cluster_config";
 
   public static final String CLUSTER_CONFIG_DISK_DIR_PREFIX = "ConfigDiskDir_";
 
diff --git 
a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupConfigFactory.java
 
b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupConfigFactory.java
index e77f5ece96..f04d06d5ef 100644
--- 
a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupConfigFactory.java
+++ 
b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupConfigFactory.java
@@ -17,6 +17,7 @@ package org.apache.geode.internal.cache.backup;
 import static 
org.apache.geode.internal.cache.backup.AbstractBackupWriterConfig.TIMESTAMP;
 import static 
org.apache.geode.internal.cache.backup.AbstractBackupWriterConfig.TYPE;
 import static 
org.apache.geode.internal.cache.backup.FileSystemBackupWriterConfig.BASELINE_DIR;
+import static 
org.apache.geode.internal.cache.backup.FileSystemBackupWriterConfig.INCLUDE_DISK_STORES;
 import static 
org.apache.geode.internal.cache.backup.FileSystemBackupWriterConfig.TARGET_DIR;
 
 import java.text.SimpleDateFormat;
@@ -27,6 +28,7 @@ class BackupConfigFactory {
 
   private String targetDirPath;
   private String baselineDirPath;
+  private String includeDiskStores;
 
   BackupConfigFactory() {
     // nothing
@@ -42,6 +44,11 @@ class BackupConfigFactory {
     return this;
   }
 
+  BackupConfigFactory withIncludeDiskStores(String includeDiskStores) {
+    this.includeDiskStores = includeDiskStores;
+    return this;
+  }
+
   Properties createBackupProperties() {
     Properties properties = new Properties();
     SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd-HH-mm-ss");
@@ -51,6 +58,9 @@ class BackupConfigFactory {
     if (baselineDirPath != null) {
       properties.setProperty(BASELINE_DIR, baselineDirPath);
     }
+    if (includeDiskStores != null) {
+      properties.setProperty(INCLUDE_DISK_STORES, includeDiskStores);
+    }
     return properties;
   }
 }
diff --git 
a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupOperation.java
 
b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupOperation.java
index 3084c0ed69..29b10decac 100644
--- 
a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupOperation.java
+++ 
b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupOperation.java
@@ -71,6 +71,14 @@ public class BackupOperation {
     return performBackup(properties);
   }
 
+  public BackupStatus backupAllMembers(String targetDirPath, String 
baselineDirPath,
+      String includeDiskStores) {
+    Properties properties = new 
BackupConfigFactory().withTargetDirPath(targetDirPath)
+        
.withBaselineDirPath(baselineDirPath).withIncludeDiskStores(includeDiskStores)
+        .createBackupProperties();
+    return performBackup(properties);
+  }
+
   private BackupStatus performBackup(Properties properties) throws 
ManagementException {
     if (backupLockService.obtainLock(dm)) {
       try {
diff --git 
a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupService.java
 
b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupService.java
index 8f0139dee3..d25143cb43 100644
--- 
a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupService.java
+++ 
b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupService.java
@@ -54,8 +54,14 @@ public class BackupService {
 
   public HashSet<PersistentID> prepareBackup(InternalDistributedMember sender, 
BackupWriter writer)
       throws IOException, InterruptedException {
+    return prepareBackup(sender, writer, null);
+  }
+
+  public HashSet<PersistentID> prepareBackup(InternalDistributedMember sender, 
BackupWriter writer,
+      String includeDiskStores)
+      throws IOException, InterruptedException {
     validateRequestingSender(sender);
-    BackupTask backupTask = new BackupTask(cache, writer);
+    BackupTask backupTask = new BackupTask(cache, writer, includeDiskStores);
     if (!currentTask.compareAndSet(null, backupTask)) {
       throw new IOException("Another backup is already in progress");
     }
diff --git 
a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupTask.java
 
b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupTask.java
index 8124dee40d..6695f4ef2c 100644
--- 
a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupTask.java
+++ 
b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupTask.java
@@ -14,13 +14,19 @@
  */
 package org.apache.geode.internal.cache.backup;
 
+import static 
org.apache.geode.distributed.internal.InternalConfigurationPersistenceService.CLUSTER_CONFIG_DISK_STORE_NAME;
+
 import java.io.IOException;
+import java.util.Arrays;
 import java.util.Collection;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Map;
+import java.util.Set;
 import java.util.concurrent.CountDownLatch;
+import java.util.stream.Collectors;
 
+import org.apache.commons.lang3.StringUtils;
 import org.apache.logging.log4j.Logger;
 
 import org.apache.geode.InternalGemFireError;
@@ -37,7 +43,6 @@ import org.apache.geode.logging.internal.log4j.api.LogService;
  */
 class BackupTask {
   private static final Logger logger = LogService.getLogger();
-
   private final Map<DiskStoreImpl, DiskStoreBackup> backupByDiskStore = new 
HashMap<>();
   private final RestoreScript restoreScript = new RestoreScript();
   private final InternalCache cache;
@@ -46,15 +51,25 @@ class BackupTask {
   private final CountDownLatch otherMembersReady = new CountDownLatch(1);
   private final HashSet<PersistentID> diskStoresWithData = new HashSet<>();
   private final BackupWriter backupWriter;
+  private final Set<String> includeDiskStoresSet = new HashSet<>();
 
   private volatile boolean isCancelled;
 
   private TemporaryBackupFiles temporaryFiles;
   private BackupFileCopier fileCopier;
 
-  BackupTask(InternalCache cache, BackupWriter backupWriter) {
+  BackupTask(InternalCache cache, BackupWriter backupWriter, String 
includeDiskStores) {
     this.cache = cache;
     this.backupWriter = backupWriter;
+    if (includeDiskStores != null) {
+      
this.includeDiskStoresSet.addAll(Arrays.stream(includeDiskStores.split(","))
+          .filter(StringUtils::isNotBlank)
+          .collect(Collectors.toSet()));
+      if (!this.includeDiskStoresSet.isEmpty()) {
+        // add internal disk-store for shared configuration data
+        this.includeDiskStoresSet.add(CLUSTER_CONFIG_DISK_STORE_NAME);
+      }
+    }
   }
 
   HashSet<PersistentID> getPreparedDiskStores() throws InterruptedException {
@@ -86,7 +101,9 @@ class BackupTask {
   private void prepareForBackup() {
     for (DiskStore store : cache.listDiskStoresIncludingRegionOwned()) {
       DiskStoreImpl storeImpl = (DiskStoreImpl) store;
-
+      if (!isDiskStoreIncluded(store)) {
+        continue;
+      }
       storeImpl.lockStoreBeforeBackup();
       if (logger.isDebugEnabled()) {
         logger.debug("Acquired lock for backup on disk store {}", 
store.getName());
@@ -145,6 +162,9 @@ class BackupTask {
     Map<DiskStoreImpl, DiskStoreBackup> backupByDiskStore = new HashMap<>();
 
     for (DiskStore store : diskStores) {
+      if (!isDiskStoreIncluded(store)) {
+        continue;
+      }
       DiskStoreImpl diskStore = (DiskStoreImpl) store;
       try {
         if (diskStore.hasPersistedData()) {
@@ -161,6 +181,16 @@ class BackupTask {
     return backupByDiskStore;
   }
 
+  boolean isDiskStoreIncluded(DiskStore store) {
+    if (includeDiskStoresSet.isEmpty()) {
+      return true;
+    }
+    if (includeDiskStoresSet.contains(store.getName())) {
+      return true;
+    }
+    return false;
+  }
+
   void abort() {
     isCancelled = true;
     otherMembersReady.countDown();
diff --git 
a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/FileSystemBackupWriterConfig.java
 
b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/FileSystemBackupWriterConfig.java
index 02ef4b12b2..700fdfb996 100644
--- 
a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/FileSystemBackupWriterConfig.java
+++ 
b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/FileSystemBackupWriterConfig.java
@@ -22,6 +22,7 @@ class FileSystemBackupWriterConfig extends 
AbstractBackupWriterConfig {
 
   static final String TARGET_DIR = "TARGET_DIRECTORY";
   static final String BASELINE_DIR = "BASELINE_DIRECTORY";
+  static final String INCLUDE_DISK_STORES = "INCLUDE_DISK_STORES";
 
   FileSystemBackupWriterConfig(Properties properties) {
     super(properties);
diff --git 
a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/PrepareBackup.java
 
b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/PrepareBackup.java
index 24e1b72800..0d11a4ae35 100644
--- 
a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/PrepareBackup.java
+++ 
b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/PrepareBackup.java
@@ -26,11 +26,14 @@ class PrepareBackup {
   private final InternalDistributedMember member;
   private final InternalCache cache;
   private final BackupWriter backupWriter;
+  private final String includeDiskStores;
 
-  PrepareBackup(InternalDistributedMember member, InternalCache cache, 
BackupWriter backupWriter) {
+  PrepareBackup(InternalDistributedMember member, InternalCache cache, 
BackupWriter backupWriter,
+      String includeDiskStores) {
     this.member = member;
     this.cache = cache;
     this.backupWriter = backupWriter;
+    this.includeDiskStores = includeDiskStores;
   }
 
   HashSet<PersistentID> run() throws IOException, InterruptedException {
@@ -38,7 +41,8 @@ class PrepareBackup {
     if (cache == null) {
       persistentIds = new HashSet<>();
     } else {
-      persistentIds = cache.getBackupService().prepareBackup(member, 
backupWriter);
+      persistentIds =
+          cache.getBackupService().prepareBackup(member, backupWriter, 
includeDiskStores);
     }
     return persistentIds;
   }
diff --git 
a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/PrepareBackupFactory.java
 
b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/PrepareBackupFactory.java
index 757aebcf51..b79abaa607 100644
--- 
a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/PrepareBackupFactory.java
+++ 
b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/PrepareBackupFactory.java
@@ -15,6 +15,7 @@
 package org.apache.geode.internal.cache.backup;
 
 import static 
org.apache.geode.internal.cache.backup.AbstractBackupWriterConfig.TYPE;
+import static 
org.apache.geode.internal.cache.backup.FileSystemBackupWriterConfig.INCLUDE_DISK_STORES;
 
 import java.util.HashSet;
 import java.util.Properties;
@@ -42,7 +43,9 @@ class PrepareBackupFactory {
     String memberId = cleanSpecialCharacters(member.toString());
     BackupWriter backupWriter = 
BackupWriterFactory.getFactoryForType(properties.getProperty(TYPE))
         .createWriter(properties, memberId);
-    return new PrepareBackup(member, cache, backupWriter);
+
+    String includeDiskStores = properties.getProperty(INCLUDE_DISK_STORES);
+    return new PrepareBackup(member, cache, backupWriter, includeDiskStores);
   }
 
   BackupResponse createBackupResponse(InternalDistributedMember sender,
diff --git 
a/geode-core/src/main/java/org/apache/geode/management/internal/i18n/CliStrings.java
 
b/geode-core/src/main/java/org/apache/geode/management/internal/i18n/CliStrings.java
index 26d5ed7da2..f533c77960 100644
--- 
a/geode-core/src/main/java/org/apache/geode/management/internal/i18n/CliStrings.java
+++ 
b/geode-core/src/main/java/org/apache/geode/management/internal/i18n/CliStrings.java
@@ -531,6 +531,14 @@ public class CliStrings {
   public static final String BACKUP_DISK_STORE_MSG_NO_DISKSTORES_BACKED_UP =
       "No disk store(s) were backed up.";
 
+  public static final String BACKUP_INCLUDE_DISK_STORES = 
"include-disk-stores";
+  public static final String BACKUP_INCLUDE_DISK_STORES__HELP = "List of 
disk-stores to include.";
+  public static final String 
BACKUP_DISK_STORE__MSG__SPECIFY_VALID_INCLUDE_DISKSTORE_UNKNOWN_DISKSTORE_0 =
+      "Specify valid include-disk-stores. Unknown Disk Store : \"{0}\".";
+
+  public static final String 
BACKUP_DISK_STORE__MSG__SPECIFY_VALID_INCLUDE_DISKSTORE_UNKNOWN_DISKSTORE_1 =
+      "Specify valid include-disk-stores. Blank name added in list of 
disk-stores";
+
   /* 'compact disk-store' command */
   public static final String COMPACT_DISK_STORE = "compact disk-store";
   public static final String COMPACT_DISK_STORE__HELP =
diff --git a/geode-docs/tools_modules/gfsh/command-pages/backup.html.md.erb 
b/geode-docs/tools_modules/gfsh/command-pages/backup.html.md.erb
index d3832a842d..8c1b099499 100644
--- a/geode-docs/tools_modules/gfsh/command-pages/backup.html.md.erb
+++ b/geode-docs/tools_modules/gfsh/command-pages/backup.html.md.erb
@@ -32,7 +32,7 @@ You can also use this command to perform an incremental 
backup. See [Creating Ba
 **Syntax:**
 
 ``` pre
-backup disk-store --dir=value [--baseline-dir=value]
+backup disk-store --dir=value [--baseline-dir=value] 
[--include-disk-stores=value(,value)*]
 ```
 
 <a 
id="topic_E74ED23CB60342538B2175C326E7D758__table_2277A2CE8F6E4731B45FEFA2B1366DB6"></a>
@@ -59,6 +59,11 @@ backup disk-store --dir=value [--baseline-dir=value]
 <td>Directory that contains the baseline backup used for comparison during an 
incremental backup.
 <p>An incremental backup operation backs up any data that is not present in 
the directory specified in <span class="keyword 
parmname">&#8209;&#8209;baseline-dir</span>. If the member cannot find 
previously backed up data or if the previously backed up data is corrupt, the 
command performs a full backup on that member.</p></td>
 </tr>
+<tr>
+<td><span class="keyword 
parmname">&#8209;&#8209;include-disk-stores</span></td>
+<td>List of disk-stores to include in backup.
+<p>Selective backup of disk-stores listed in <span class="keyword 
parmname">&#8209;&#8209;include-disk-stores</span>. The specified disk-stores 
must exist in the system. If this parameter is not specified, all disk-stores 
are included in the backup.</p></td>
+</tr>
 </tbody>
 </table>
 
@@ -70,6 +75,9 @@ backup disk-store --dir=value [--baseline-dir=value]
 backup disk-store --dir=data/backups
 
 backup disk-store --dir=data/backup/disk-store 
--baselineDir=data/backups/2012-09-24-17-08-50
+
+backup disk-store --dir=data/backups --include-disk-stores=data
+
 ```
 
 **Sample Output:**
diff --git 
a/geode-gfsh/src/distributedTest/java/org/apache/geode/management/internal/cli/commands/BackupDiskStoreCommandDUnitTest.java
 
b/geode-gfsh/src/distributedTest/java/org/apache/geode/management/internal/cli/commands/BackupDiskStoreCommandDUnitTest.java
new file mode 100644
index 0000000000..3bbeac66ec
--- /dev/null
+++ 
b/geode-gfsh/src/distributedTest/java/org/apache/geode/management/internal/cli/commands/BackupDiskStoreCommandDUnitTest.java
@@ -0,0 +1,273 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more 
contributor license
+ * agreements. See the NOTICE file distributed with this work for additional 
information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache 
License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the 
License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software 
distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 
KIND, either express
+ * or implied. See the License for the specific language governing permissions 
and limitations under
+ * the License.
+ */
+
+package org.apache.geode.management.internal.cli.commands;
+
+
+import static org.apache.geode.test.awaitility.GeodeAwaitility.await;
+import static org.assertj.core.api.Assertions.assertThat;
+
+import java.util.List;
+
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.rules.TestName;
+
+import org.apache.geode.cache.CacheFactory;
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.RegionShortcut;
+import org.apache.geode.management.internal.cli.result.model.ResultModel;
+import 
org.apache.geode.management.internal.cli.result.model.TabularResultModel;
+import org.apache.geode.management.internal.cli.util.CommandStringBuilder;
+import org.apache.geode.management.internal.i18n.CliStrings;
+import org.apache.geode.test.dunit.rules.ClusterStartupRule;
+import org.apache.geode.test.dunit.rules.MemberVM;
+import org.apache.geode.test.junit.categories.PersistenceTest;
+import org.apache.geode.test.junit.rules.GfshCommandRule;
+
+@Category({PersistenceTest.class})
+public class BackupDiskStoreCommandDUnitTest {
+
+  private MemberVM locator;
+
+  @Rule
+  public TestName testName = new TestName();
+
+  @Rule
+  public ClusterStartupRule lsRule = new ClusterStartupRule();
+
+  @Rule
+  public GfshCommandRule gfshConnector = new GfshCommandRule();
+
+  @Before
+  public void before() throws Exception {
+    locator = lsRule.startLocatorVM(0);
+    gfshConnector.connect(locator);
+    assertThat(gfshConnector.isConnected()).isTrue();
+
+    // start a server so that we can execute data commands that requires at 
least a server running
+  }
+
+  @Test
+  public void backupDiskStoresOfOneDiskStore() {
+    MemberVM server1 = lsRule.startServerVM(1, locator.getPort());
+    @SuppressWarnings("unused")
+    MemberVM server2 = lsRule.startServerVM(2, locator.getPort());
+    @SuppressWarnings("unused")
+
+    final String testRegionName = "regionA";
+    CommandStringBuilder csb;
+    csb = new CommandStringBuilder(CliStrings.CREATE_DISK_STORE)
+        .addOption(CliStrings.CREATE_DISK_STORE__NAME, "diskStore")
+        .addOption(CliStrings.CREATE_DISK_STORE__DIRECTORY_AND_SIZE, 
"diskStoreDir");
+    
gfshConnector.executeAndAssertThat(csb.getCommandString()).statusIsSuccess();
+
+    CommandStringBuilder createRegion = new 
CommandStringBuilder(CliStrings.CREATE_REGION)
+        .addOption(CliStrings.CREATE_REGION__REGION, testRegionName)
+        .addOption(CliStrings.CREATE_REGION__DISKSTORE, "diskStore")
+        .addOption(CliStrings.CREATE_REGION__REGIONSHORTCUT,
+            RegionShortcut.PARTITION_PERSISTENT.toString());
+    await().untilAsserted(() -> 
gfshConnector.executeAndAssertThat(createRegion.getCommandString())
+        .statusIsSuccess());
+
+    // Add data to the region
+    addData(server1, testRegionName);
+
+    csb = new CommandStringBuilder(CliStrings.BACKUP_DISK_STORE)
+        .addOption(CliStrings.BACKUP_DISK_STORE__DISKDIRS, "backupDir");
+
+    @SuppressWarnings("deprecation")
+    ResultModel result =
+        gfshConnector.executeCommand(csb.getCommandString()).getResultData();
+    TabularResultModel tableSection = 
result.getTableSection("backed-up-diskstores");
+    List<String> list = tableSection.getValuesInColumn("UUID");
+    assertThat(list).hasSize(3);
+  }
+
+  @Test
+  public void backupDiskStoresTwoDiskStores() {
+    MemberVM server1 = lsRule.startServerVM(1, locator.getPort());
+    @SuppressWarnings("unused")
+    MemberVM server2 = lsRule.startServerVM(2, locator.getPort());
+    @SuppressWarnings("unused")
+
+    final String testRegionName = "regionA";
+    final String testRegionName2 = "regionB";
+
+    CommandStringBuilder csb;
+    csb = new CommandStringBuilder(CliStrings.CREATE_DISK_STORE)
+        .addOption(CliStrings.CREATE_DISK_STORE__NAME, "diskStore")
+        .addOption(CliStrings.CREATE_DISK_STORE__DIRECTORY_AND_SIZE, 
"diskStoreDir");
+    
gfshConnector.executeAndAssertThat(csb.getCommandString()).statusIsSuccess();
+
+    CommandStringBuilder createRegion = new 
CommandStringBuilder(CliStrings.CREATE_REGION)
+        .addOption(CliStrings.CREATE_REGION__REGION, testRegionName)
+        .addOption(CliStrings.CREATE_REGION__DISKSTORE, "diskStore")
+        .addOption(CliStrings.CREATE_REGION__REGIONSHORTCUT,
+            RegionShortcut.PARTITION_PERSISTENT.toString());
+    await().untilAsserted(() -> 
gfshConnector.executeAndAssertThat(createRegion.getCommandString())
+        .statusIsSuccess());
+
+
+    csb = new CommandStringBuilder(CliStrings.CREATE_DISK_STORE)
+        .addOption(CliStrings.CREATE_DISK_STORE__NAME, "diskStore2")
+        .addOption(CliStrings.CREATE_DISK_STORE__DIRECTORY_AND_SIZE, 
"diskStoreDir2");
+    
gfshConnector.executeAndAssertThat(csb.getCommandString()).statusIsSuccess();
+
+    CommandStringBuilder createRegion2 = new 
CommandStringBuilder(CliStrings.CREATE_REGION)
+        .addOption(CliStrings.CREATE_REGION__REGION, testRegionName2)
+        .addOption(CliStrings.CREATE_REGION__DISKSTORE, "diskStore2")
+        .addOption(CliStrings.CREATE_REGION__REGIONSHORTCUT,
+            RegionShortcut.PARTITION_PERSISTENT.toString());
+    await().untilAsserted(() -> 
gfshConnector.executeAndAssertThat(createRegion2.getCommandString())
+        .statusIsSuccess());
+
+    // Add data to the region
+    addData(server1, testRegionName);
+    addData(server2, testRegionName2);
+
+    csb = new CommandStringBuilder(CliStrings.BACKUP_DISK_STORE)
+        .addOption(CliStrings.BACKUP_DISK_STORE__DISKDIRS, "backupDir");
+
+    @SuppressWarnings("deprecation")
+    ResultModel result =
+        gfshConnector.executeCommand(csb.getCommandString()).getResultData();
+    TabularResultModel tableSection = 
result.getTableSection("backed-up-diskstores");
+    List<String> list = tableSection.getValuesInColumn("UUID");
+    assertThat(list).hasSize(5);
+  }
+
+  @Test
+  public void backupDiskStoresTwoDiskStoresIncludeJustOne() {
+    MemberVM server1 = lsRule.startServerVM(1, locator.getPort());
+    @SuppressWarnings("unused")
+    MemberVM server2 = lsRule.startServerVM(2, locator.getPort());
+    @SuppressWarnings("unused")
+
+    final String testRegionName = "regionA";
+    final String testRegionName2 = "regionB";
+
+    CommandStringBuilder csb;
+    csb = new CommandStringBuilder(CliStrings.CREATE_DISK_STORE)
+        .addOption(CliStrings.CREATE_DISK_STORE__NAME, "diskStore")
+        .addOption(CliStrings.CREATE_DISK_STORE__DIRECTORY_AND_SIZE, 
"diskStoreDir");
+    
gfshConnector.executeAndAssertThat(csb.getCommandString()).statusIsSuccess();
+
+    CommandStringBuilder createRegion = new 
CommandStringBuilder(CliStrings.CREATE_REGION)
+        .addOption(CliStrings.CREATE_REGION__REGION, testRegionName)
+        .addOption(CliStrings.CREATE_REGION__DISKSTORE, "diskStore")
+        .addOption(CliStrings.CREATE_REGION__REGIONSHORTCUT,
+            RegionShortcut.PARTITION_PERSISTENT.toString());
+    await().untilAsserted(() -> 
gfshConnector.executeAndAssertThat(createRegion.getCommandString())
+        .statusIsSuccess());
+
+
+    csb = new CommandStringBuilder(CliStrings.CREATE_DISK_STORE)
+        .addOption(CliStrings.CREATE_DISK_STORE__NAME, "diskStore2")
+        .addOption(CliStrings.CREATE_DISK_STORE__DIRECTORY_AND_SIZE, 
"diskStoreDir2");
+    
gfshConnector.executeAndAssertThat(csb.getCommandString()).statusIsSuccess();
+
+    CommandStringBuilder createRegion2 = new 
CommandStringBuilder(CliStrings.CREATE_REGION)
+        .addOption(CliStrings.CREATE_REGION__REGION, testRegionName2)
+        .addOption(CliStrings.CREATE_REGION__DISKSTORE, "diskStore2")
+        .addOption(CliStrings.CREATE_REGION__REGIONSHORTCUT,
+            RegionShortcut.PARTITION_PERSISTENT.toString());
+    await().untilAsserted(() -> 
gfshConnector.executeAndAssertThat(createRegion2.getCommandString())
+        .statusIsSuccess());
+
+    // Add data to the region
+    addData(server1, testRegionName);
+    addData(server2, testRegionName2);
+
+    csb = new CommandStringBuilder(CliStrings.BACKUP_DISK_STORE)
+        .addOption(CliStrings.BACKUP_DISK_STORE__DISKDIRS, "backupDir")
+        .addOption(CliStrings.BACKUP_INCLUDE_DISK_STORES, "diskStore");
+
+    @SuppressWarnings("deprecation")
+    ResultModel result =
+        gfshConnector.executeCommand(csb.getCommandString()).getResultData();
+    TabularResultModel tableSection = 
result.getTableSection("backed-up-diskstores");
+    List<String> list = tableSection.getValuesInColumn("UUID");
+    assertThat(list).hasSize(3);
+  }
+
+  @Test
+  public void backupDiskStoresInvalidIncludeDiskStores() {
+    MemberVM server1 = lsRule.startServerVM(1, locator.getPort());
+    @SuppressWarnings("unused")
+    MemberVM server2 = lsRule.startServerVM(2, locator.getPort());
+    @SuppressWarnings("unused")
+
+    final String testRegionName = "regionA";
+    final String testRegionName2 = "regionB";
+
+    CommandStringBuilder csb;
+    csb = new CommandStringBuilder(CliStrings.CREATE_DISK_STORE)
+        .addOption(CliStrings.CREATE_DISK_STORE__NAME, "diskStore")
+        .addOption(CliStrings.CREATE_DISK_STORE__DIRECTORY_AND_SIZE, 
"diskStoreDir");
+    
gfshConnector.executeAndAssertThat(csb.getCommandString()).statusIsSuccess();
+
+    CommandStringBuilder createRegion = new 
CommandStringBuilder(CliStrings.CREATE_REGION)
+        .addOption(CliStrings.CREATE_REGION__REGION, testRegionName)
+        .addOption(CliStrings.CREATE_REGION__DISKSTORE, "diskStore")
+        .addOption(CliStrings.CREATE_REGION__REGIONSHORTCUT,
+            RegionShortcut.PARTITION_PERSISTENT.toString());
+    await().untilAsserted(() -> 
gfshConnector.executeAndAssertThat(createRegion.getCommandString())
+        .statusIsSuccess());
+
+
+    csb = new CommandStringBuilder(CliStrings.CREATE_DISK_STORE)
+        .addOption(CliStrings.CREATE_DISK_STORE__NAME, "diskStore2")
+        .addOption(CliStrings.CREATE_DISK_STORE__DIRECTORY_AND_SIZE, 
"diskStoreDir2");
+    
gfshConnector.executeAndAssertThat(csb.getCommandString()).statusIsSuccess();
+
+    CommandStringBuilder createRegion2 = new 
CommandStringBuilder(CliStrings.CREATE_REGION)
+        .addOption(CliStrings.CREATE_REGION__REGION, testRegionName2)
+        .addOption(CliStrings.CREATE_REGION__DISKSTORE, "diskStore2")
+        .addOption(CliStrings.CREATE_REGION__REGIONSHORTCUT,
+            RegionShortcut.PARTITION_PERSISTENT.toString());
+    await().untilAsserted(() -> 
gfshConnector.executeAndAssertThat(createRegion2.getCommandString())
+        .statusIsSuccess());
+
+    // Add data to the region
+    addData(server1, testRegionName);
+    addData(server2, testRegionName2);
+
+    csb = new CommandStringBuilder(CliStrings.BACKUP_DISK_STORE)
+        .addOption(CliStrings.BACKUP_DISK_STORE__DISKDIRS, "backupDir")
+        .addOption(CliStrings.BACKUP_INCLUDE_DISK_STORES, "diskStore3");
+
+    gfshConnector.executeAndAssertThat(csb.getCommandString()).statusIsError()
+        .containsOutput("Specify valid include-disk-stores.");
+
+    csb = new CommandStringBuilder(CliStrings.BACKUP_DISK_STORE)
+        .addOption(CliStrings.BACKUP_DISK_STORE__DISKDIRS, "backupDir")
+        .addOption(CliStrings.BACKUP_INCLUDE_DISK_STORES, 
"diskStore,diskStore4");
+
+    gfshConnector.executeAndAssertThat(csb.getCommandString()).statusIsError()
+        .containsOutput("Specify valid include-disk-stores.");
+  }
+
+  private void addData(MemberVM server1, String testRegionName) {
+    server1.invoke(() -> {
+      Region<Object, Object> region = 
CacheFactory.getAnyInstance().getRegion(testRegionName);
+      for (int i = 0; i < 113; i++) {
+        region.put(i, "A");
+      }
+    });
+  }
+}
diff --git 
a/geode-gfsh/src/main/java/org/apache/geode/management/internal/cli/commands/BackupDiskStoreCommand.java
 
b/geode-gfsh/src/main/java/org/apache/geode/management/internal/cli/commands/BackupDiskStoreCommand.java
index 3153d78d3a..a4850b0063 100644
--- 
a/geode-gfsh/src/main/java/org/apache/geode/management/internal/cli/commands/BackupDiskStoreCommand.java
+++ 
b/geode-gfsh/src/main/java/org/apache/geode/management/internal/cli/commands/BackupDiskStoreCommand.java
@@ -15,9 +15,11 @@
 
 package org.apache.geode.management.internal.cli.commands;
 
+import java.util.Arrays;
 import java.util.Map;
 import java.util.Set;
 
+import org.apache.commons.lang3.StringUtils;
 import org.springframework.shell.core.annotation.CliCommand;
 import org.springframework.shell.core.annotation.CliOption;
 
@@ -27,6 +29,8 @@ import 
org.apache.geode.distributed.internal.DistributionManager;
 import org.apache.geode.internal.cache.InternalCache;
 import org.apache.geode.internal.cache.backup.BackupOperation;
 import org.apache.geode.management.BackupStatus;
+import org.apache.geode.management.DistributedSystemMXBean;
+import org.apache.geode.management.ManagementService;
 import org.apache.geode.management.cli.CliMetaData;
 import org.apache.geode.management.cli.GfshCommand;
 import org.apache.geode.management.internal.cli.result.model.ResultModel;
@@ -51,7 +55,9 @@ public class BackupDiskStoreCommand extends GfshCommand {
       @CliOption(key = CliStrings.BACKUP_DISK_STORE__DISKDIRS,
           help = CliStrings.BACKUP_DISK_STORE__DISKDIRS__HELP, mandatory = 
true) String targetDir,
       @CliOption(key = CliStrings.BACKUP_DISK_STORE__BASELINEDIR,
-          help = CliStrings.BACKUP_DISK_STORE__BASELINEDIR__HELP) String 
baselineDir) {
+          help = CliStrings.BACKUP_DISK_STORE__BASELINEDIR__HELP) String 
baselineDir,
+      @CliOption(key = CliStrings.BACKUP_INCLUDE_DISK_STORES,
+          help = CliStrings.BACKUP_INCLUDE_DISK_STORES__HELP) String[] 
includeDiskStores) {
 
     authorize(ResourcePermission.Resource.CLUSTER, 
ResourcePermission.Operation.WRITE,
         ResourcePermission.Target.DISK);
@@ -62,11 +68,30 @@ public class BackupDiskStoreCommand extends GfshCommand {
       DistributionManager dm = cache.getDistributionManager();
       BackupStatus backupStatus;
 
+      String includeDiskStoresString = null;
+      if (includeDiskStores != null && includeDiskStores.length > 0) {
+        for (String name : includeDiskStores) {
+          if (name != null && !name.isEmpty()) {
+            if (!diskStoreExists(name)) {
+              return ResultModel.createError(CliStrings.format(
+                  
CliStrings.BACKUP_DISK_STORE__MSG__SPECIFY_VALID_INCLUDE_DISKSTORE_UNKNOWN_DISKSTORE_0,
+                  new Object[] {name}));
+            }
+          } else {
+            return ResultModel.createError(CliStrings.format(
+                
CliStrings.BACKUP_DISK_STORE__MSG__SPECIFY_VALID_INCLUDE_DISKSTORE_UNKNOWN_DISKSTORE_1));
+          }
+        }
+        includeDiskStoresString = StringUtils.join(includeDiskStores, ",");
+      }
+
       if (baselineDir != null && !baselineDir.isEmpty()) {
         backupStatus =
-            new BackupOperation(dm, dm.getCache()).backupAllMembers(targetDir, 
baselineDir);
+            new BackupOperation(dm, dm.getCache()).backupAllMembers(targetDir, 
baselineDir,
+                includeDiskStoresString);
       } else {
-        backupStatus = new BackupOperation(dm, 
dm.getCache()).backupAllMembers(targetDir, null);
+        backupStatus = new BackupOperation(dm, 
dm.getCache()).backupAllMembers(targetDir, null,
+            includeDiskStoresString);
       }
 
       Map<DistributedMember, Set<PersistentID>> backedupMemberDiskstoreMap =
@@ -136,4 +161,13 @@ public class BackupDiskStoreCommand extends GfshCommand {
     table.accumulate(CliStrings.BACKUP_DISK_STORE_MSG_DIRECTORY, directory);
     table.accumulate(CliStrings.BACKUP_DISK_STORE_MSG_HOST, host);
   }
+
+  private boolean diskStoreExists(String diskStoreName) {
+    ManagementService managementService = getManagementService();
+    DistributedSystemMXBean dsMXBean = 
managementService.getDistributedSystemMXBean();
+
+    return Arrays.stream(dsMXBean.listMembers()).anyMatch(
+        member -> 
DiskStoreCommandsUtils.diskStoreBeanAndMemberBeanDiskStoreExists(dsMXBean, 
member,
+            diskStoreName));
+  }
 }

Reply via email to