anmolnar commented on a change in pull request #2931:
URL: https://github.com/apache/hbase/pull/2931#discussion_r577614628



##########
File path: 
hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPersistedStoreEngine.java
##########
@@ -0,0 +1,152 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import static org.hamcrest.core.Is.isA;
+import static org.junit.Assert.assertEquals;
+import java.io.IOException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.CellComparator;
+import org.apache.hadoop.hbase.CellComparatorImpl;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.testclassification.RegionServerTests;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.rules.ExpectedException;
+import org.junit.rules.TestName;
+import org.mockito.Mockito;
+
+@Category({ RegionServerTests.class, MediumTests.class })
+public class TestPersistedStoreEngine {
+
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+    HBaseClassTestRule.forClass(TestPersistedStoreEngine.class);
+
+  @Rule
+  public TestName name = new TestName();
+
+  @Rule
+  public ExpectedException expectedException = ExpectedException.none();
+
+  private static final HBaseTestingUtility TEST_UTIL = new 
HBaseTestingUtility();
+  private static final byte[] DEFAULT_STORE_BYTE = TEST_UTIL.fam1;
+
+  private TableName tableName;
+  private Configuration conf;
+  private HStore store;
+
+  @BeforeClass
+  public static void setUpCluster() throws Exception {
+    
TEST_UTIL.getConfiguration().setBoolean(HConstants.STOREFILE_TRACKING_PERSIST_ENABLED,
 true);
+    TEST_UTIL.getConfiguration().set(StoreEngine.STORE_ENGINE_CLASS_KEY,
+      PersistedStoreEngine.class.getName());
+    TEST_UTIL.startMiniCluster();
+  }
+
+  @Before
+  public void setup() throws Exception {
+    store = Mockito.mock(HStore.class);
+    StoreContext context = new StoreContext.Builder().build();
+
+    conf = TEST_UTIL.getConfiguration();
+    Mockito.when(store.getStoreContext()).thenReturn(context);
+    
Mockito.when(store.getRegionInfo()).thenReturn(RegionInfoBuilder.FIRST_META_REGIONINFO);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    for (TableDescriptor htd: TEST_UTIL.getAdmin().listTableDescriptors()) {
+      TEST_UTIL.deleteTable(htd.getTableName());
+    }
+  }
+
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    // use killMiniHBaseCluster before shutdownMiniCluster because 
shutdownMiniCluster puts a RS
+    // that host hbase:meta to a stopped state that prevents meta lookup the 
table state of
+    // hbase:storefile and hangs the last write to hbase:storefile when any 
tracking region closes.
+    //
+    // this should be rarely happened on a real cluster and RS with meta table 
should be the last
+    // to be shutdown normally.
+    TEST_UTIL.killMiniHBaseCluster();

Review comment:
       I think I understand the reasoning. Is that not the case in the other 
mini-cluster test `TestPersistedStoreFileManager` where you just shutdown the 
cluster without killing it?

##########
File path: 
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFilePathAccessor.java
##########
@@ -0,0 +1,101 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Set;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * Helper class to interact with the hbase storefile tracking data persisted 
as off-memory data
+ * from the {@link StoreFileManager}
+ *
+ * There is only a set of tracking storefiles, 'included'.
+ *
+ * e.g. list of storefile paths in 'included' should be the identical copy of 
the in-memory
+ * {@link HStoreFile}'s Path(s) and can be reused during region opens and 
region reassignment.
+ */
+@InterfaceAudience.Private
+public interface StoreFilePathAccessor {
+
+  /**
+   * Create the storefile tracking with the help of using the masterService
+   * @param masterServices instance of HMaster
+   * @throws IOException if Master is not running or connection has been lost
+   */
+  void initialize(final MasterServices masterServices) throws IOException;

Review comment:
       Is this method only used in tests?

##########
File path: 
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StorefileTrackingUtils.java
##########
@@ -0,0 +1,153 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+import java.util.stream.Collectors;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Utility class to support persistent storefile tracking
+ */
+@InterfaceAudience.Private
+public final class StorefileTrackingUtils {
+
+  private static Logger LOG = 
LoggerFactory.getLogger(StorefileTrackingUtils.class);
+  public static final long SLEEP_DELTA_MS = 
TimeUnit.MILLISECONDS.toMillis(100);
+
+  private StorefileTrackingUtils() {
+    // private for utility class
+  }
+
+  public static boolean isStorefileTrackingPersistEnabled(Configuration conf) {
+    boolean isStoreTrackingPersistEnabled =
+      conf.getBoolean(HConstants.STOREFILE_TRACKING_PERSIST_ENABLED,
+        HConstants.DEFAULT_STOREFILE_TRACKING_PERSIST_ENABLED);
+    boolean isPersistedStoreEngineSet =
+      conf.get(StoreEngine.STORE_ENGINE_CLASS_KEY, 
DefaultStoreEngine.class.getName())
+        .equals(PersistedStoreEngine.class.getName());
+    boolean isFeatureEnabled = isStoreTrackingPersistEnabled && 
isPersistedStoreEngineSet;
+    if (isStoreTrackingPersistEnabled ^ isPersistedStoreEngineSet) {
+      // check if both configuration are correct.
+      String errorMessage = String.format("please set %s to true and set store 
engine key %s to %s "
+          + "to enable persist storefile tracking",
+        HConstants.STOREFILE_TRACKING_PERSIST_ENABLED,
+        StoreEngine.STORE_ENGINE_CLASS_KEY, 
PersistedStoreEngine.class.getName());
+      throw new IllegalArgumentException(errorMessage);
+    }
+    return isFeatureEnabled;
+  }
+
+  /**
+   * if storefile tracking feature is configured, Initialize hbase:storefile 
table and wait for it
+   * to be online. Otherwise, look for hbase:storefile table and remove it
+   *
+   * @param masterServices masterServices
+   * @throws IOException if hbase:storefile table cannot be initialized and be 
online
+   */
+  public static void init(MasterServices masterServices) throws IOException {
+    createStorefileTable(masterServices);
+    waitForStoreFileTableOnline(masterServices);
+  }
+
+  /**
+   * Cleans up all storefile related state on the cluster. disable and delete 
hbase:storefile
+   * if found
+   * @param masterServices {@link MasterServices}
+   * @throws IOException if failures
+   */
+  private static void cleanup(MasterServices masterServices) throws 
IOException {

Review comment:
       According to IntelliJ this method is not used.

##########
File path: 
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFilePathUpdate.java
##########
@@ -0,0 +1,111 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.util.List;
+import org.apache.commons.lang3.builder.EqualsBuilder;
+import org.apache.commons.lang3.builder.HashCodeBuilder;
+import org.apache.hadoop.fs.Path;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
+import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
+import 
org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils;
+
+@InterfaceAudience.Private
+final class StoreFilePathUpdate {
+
+  private final List<Path> storeFiles;
+  private final boolean hasStoreFilesUpdate;
+
+  private StoreFilePathUpdate(final List<Path> storeFiles, boolean 
hasStoreFilesUpdate) {
+    Preconditions.checkArgument(hasStoreFilesUpdate,
+      "StoreFilePathUpdate must include an update");
+    Preconditions.checkNotNull(storeFiles, "StoreFiles cannot be null");
+    if (hasStoreFilesUpdate) {
+      Preconditions
+        .checkArgument(CollectionUtils.isNotEmpty(storeFiles), "StoreFilePaths 
cannot be empty");
+    }
+    this.storeFiles = storeFiles;
+    this.hasStoreFilesUpdate = hasStoreFilesUpdate;
+  }
+
+  List<Path> getStoreFiles() {
+    return storeFiles;
+  }
+
+  boolean hasStoreFilesUpdate() {
+    return hasStoreFilesUpdate;
+  }
+
+  @Override
+  public String toString() {
+    return "StoreFilePathUpdate{" + "storeFiles=" + storeFiles + ", 
hasStoreFilesUpdate="
+      + hasStoreFilesUpdate + '}';
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) {
+      return true;
+    }
+
+    if (o == null || getClass() != o.getClass()) {
+      return false;
+    }
+
+    StoreFilePathUpdate that = (StoreFilePathUpdate) o;
+
+    return new EqualsBuilder().append(hasStoreFilesUpdate, 
that.hasStoreFilesUpdate)
+      .append(storeFiles, that.storeFiles).isEquals();
+  }
+
+  @Override
+  public int hashCode() {
+    return new HashCodeBuilder(17, 37).append(storeFiles)
+      .append(hasStoreFilesUpdate).toHashCode();
+  }
+
+  static Builder builder() {
+    return new Builder();
+  }
+
+  static class Builder {
+    private List<Path> storeFiles = ImmutableList.of();
+    private boolean hasStoreFilesUpdate;
+
+    Builder withStoreFiles(List<HStoreFile> storeFiles) {
+      Preconditions.checkArgument(!hasStoreFilesUpdate,
+        "Specify a Path List or File List, but not both");
+      this.storeFiles = 
StorefileTrackingUtils.convertStoreFilesToPaths(storeFiles);
+      this.hasStoreFilesUpdate = true;
+      return this;
+    }
+
+    Builder withStorePaths(List<Path> storeFiles) {

Review comment:
       Testing only?

##########
File path: 
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFilePathAccessor.java
##########
@@ -0,0 +1,101 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Set;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * Helper class to interact with the hbase storefile tracking data persisted 
as off-memory data
+ * from the {@link StoreFileManager}
+ *
+ * There is only a set of tracking storefiles, 'included'.
+ *
+ * e.g. list of storefile paths in 'included' should be the identical copy of 
the in-memory
+ * {@link HStoreFile}'s Path(s) and can be reused during region opens and 
region reassignment.
+ */
+@InterfaceAudience.Private
+public interface StoreFilePathAccessor {
+
+  /**
+   * Create the storefile tracking with the help of using the masterService
+   * @param masterServices instance of HMaster
+   * @throws IOException if Master is not running or connection has been lost
+   */
+  void initialize(final MasterServices masterServices) throws IOException;
+
+  /**
+   * GET storefile paths from the 'included' data set
+   * @param tableName name of the current table in String
+   * @param regionName name of the current region in String
+   * @param storeName name of the column family in String, to be combined with 
regionName to make
+   *                 the row key.
+   * @return list of StoreFile paths that should be included in reads in this 
store,
+   *         returns an empty list if the target cell is empty or doesn't 
exist.
+   * @throws IOException if a remote or network exception occurs during Get
+   */
+  List<Path> getIncludedStoreFilePaths(final String tableName, final String 
regionName,
+    final String storeName) throws IOException;
+
+  /**
+   * Writes the specified updates to the tracking

Review comment:
       In my understanding `StoreFilePathUpdate` represents a single entity 
which should be persisted by the Accessor. This could happen is some cases like 
results of a compaction, flush, recovered HFile, etc.
   Essentially list of Paths decorated with `hasStoreFilesUpdate()` property 
which I don't fully understand what it's good for.




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


Reply via email to