bgaborg commented on a change in pull request #1208: HADOOP-16423. S3Guard 
fsck: Check metadata consistency between S3 and metadatastore (log)
URL: https://github.com/apache/hadoop/pull/1208#discussion_r322787008
 
 

 ##########
 File path: 
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardFsck.java
 ##########
 @@ -0,0 +1,707 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a.s3guard;
+
+
+import java.net.URI;
+import java.util.List;
+import java.util.UUID;
+
+import org.apache.hadoop.io.IOUtils;
+import org.junit.Before;
+import org.junit.Test;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.s3a.AbstractS3ATestBase;
+import org.apache.hadoop.fs.s3a.S3AFileStatus;
+import org.apache.hadoop.fs.s3a.S3AFileSystem;
+import org.assertj.core.api.Assertions;
+
+import static org.apache.hadoop.fs.contract.ContractTestUtils.touch;
+import static org.apache.hadoop.fs.s3a.Constants.METADATASTORE_AUTHORITATIVE;
+import static org.apache.hadoop.fs.s3a.Constants.S3_METADATA_STORE_IMPL;
+import static org.apache.hadoop.fs.s3a.S3ATestUtils.awaitFileStatus;
+import static 
org.apache.hadoop.fs.s3a.S3ATestUtils.metadataStorePersistsAuthoritativeBit;
+import static 
org.apache.hadoop.fs.s3a.S3ATestUtils.removeBaseAndBucketOverrides;
+import static org.junit.Assume.assumeTrue;
+
+/**
+ * Integration tests for the S3Guard Fsck against a dyamodb backed metadata
+ * store.
+ */
+public class ITestS3GuardFsck extends AbstractS3ATestBase {
+
+  private S3AFileSystem guardedFs;
+  private S3AFileSystem rawFS;
+
+  private MetadataStore metadataStore;
+
+  @Before
+  public void setup() throws Exception {
+    super.setup();
+    S3AFileSystem fs = getFileSystem();
+    // These test will fail if no ms
+    assertTrue("FS needs to have a metadatastore.",
+        fs.hasMetadataStore());
+    assertTrue("Metadatastore should persist authoritative bit",
+        metadataStorePersistsAuthoritativeBit(fs.getMetadataStore()));
+
+    guardedFs = fs;
+    metadataStore = fs.getMetadataStore();
+
+    // create raw fs without s3guard
+    rawFS = createUnguardedFS();
+    assertFalse("Raw FS still has S3Guard " + rawFS,
+        rawFS.hasMetadataStore());
+  }
+
+  @Override
+  public void teardown() throws Exception {
+    if (guardedFs != null) {
+      IOUtils.cleanupWithLogger(LOG, guardedFs);
+    }
+    IOUtils.cleanupWithLogger(LOG, rawFS);
+    super.teardown();
+  }
+
+  /**
+   * Create a test filesystem which is always unguarded.
+   * This filesystem MUST be closed in test teardown.
+   * @return the new FS
+   */
+  private S3AFileSystem createUnguardedFS() throws Exception {
+    S3AFileSystem testFS = getFileSystem();
+    Configuration config = new Configuration(testFS.getConf());
+    URI uri = testFS.getUri();
+
+    removeBaseAndBucketOverrides(uri.getHost(), config,
+        S3_METADATA_STORE_IMPL);
+    removeBaseAndBucketOverrides(uri.getHost(), config,
+        METADATASTORE_AUTHORITATIVE);
+    S3AFileSystem fs2 = new S3AFileSystem();
+    fs2.initialize(uri, config);
+    return fs2;
+  }
+
+  @Test
+  public void testIDetectNoMetadataEntry() throws Exception {
+    final Path cwd = path("/" + getMethodName() + "-" + UUID.randomUUID());
+    final Path file = new Path(cwd, "file");
+    try {
+      touch(rawFS, file);
+      awaitFileStatus(rawFS, file);
+
+      final S3GuardFsck s3GuardFsck =
+          new S3GuardFsck(rawFS, metadataStore);
+
+      final List<S3GuardFsck.ComparePair> comparePairs =
+          s3GuardFsck.compareS3ToMs(cwd);
+
+      assertEquals("Number of pairs should be two.", 2,
+          comparePairs.size());
+      final S3GuardFsck.ComparePair pair = comparePairs.get(0);
+      assertTrue("The pair must contain a violation.", 
pair.containsViolation());
+      assertEquals("The pair must contain only one violation", 1,
+          pair.getViolations().size());
+
+      final S3GuardFsck.Violation violation =
+          pair.getViolations().iterator().next();
+      assertEquals("The violation should be that there is no violation entry.",
+          violation, S3GuardFsck.Violation.NO_METADATA_ENTRY);
+    } finally {
+      // delete the working directory with all of its contents
+      rawFS.delete(cwd, true);
+      metadataStore.forgetMetadata(file);
+      metadataStore.forgetMetadata(cwd);
+    }
+  }
+
+  @Test
+  public void testIDetectNoParentEntry() throws Exception {
+    final Path cwd = path("/" + getMethodName() + "-" + UUID.randomUUID());
+    final Path file = new Path(cwd, "file");
+    try {
+      // create a file with guarded fs
+      touch(guardedFs, file);
+      awaitFileStatus(guardedFs, file);
+
+      // delete the parent from the MS
+      metadataStore.forgetMetadata(cwd);
+
+      final S3GuardFsck s3GuardFsck =
+          new S3GuardFsck(rawFS, metadataStore);
+
+      final List<S3GuardFsck.ComparePair> comparePairs =
+          s3GuardFsck.compareS3ToMs(cwd);
+
+      assertEquals("Number of pairs should be two. The cwd (parent) and the "
+              + "child.", 2, comparePairs.size());
+
+      // check the parent that it does not exist
+      final S3GuardFsck.ComparePair cwdPair = comparePairs.stream()
+          .filter(p -> p.getPath().equals(cwd))
+          .findFirst().get();
+      assertNotNull("The pair should not be null.", cwdPair);
+      assertTrue("The cwdPair must contain a violation.", 
cwdPair.containsViolation());
+      Assertions.assertThat(cwdPair.getViolations())
+          .describedAs("Violations in the cwdPair")
+          .contains(S3GuardFsck.Violation.NO_METADATA_ENTRY);
+
+      // check the child that there's no parent entry.
+      final S3GuardFsck.ComparePair childPair = comparePairs.stream()
+          .filter(p -> p.getPath().equals(file))
+          .findFirst().get();
+      assertNotNull("The pair should not be null.", childPair);
+      assertTrue("The childPair must contain a violation.", 
childPair.containsViolation());
+      Assertions.assertThat(childPair.getViolations())
+          .describedAs("Violations in the childPair")
+          .contains(S3GuardFsck.Violation.NO_PARENT_ENTRY);
+    } finally {
+      // delete the working directory with all of its contents
+      rawFS.delete(cwd, true);
+      metadataStore.forgetMetadata(file);
+      metadataStore.forgetMetadata(cwd);
+    }
+  }
+
+  @Test
+  public void testIDetectParentIsAFile() throws Exception {
+    final Path cwd = path("/" + getMethodName() + "-" + UUID.randomUUID());
+    final Path file = new Path(cwd, "file");
+    try {
+      // create a file with guarded fs
+      touch(guardedFs, file);
 
 Review comment:
   aka raw

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: common-issues-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-issues-h...@hadoop.apache.org

Reply via email to