This is an automated email from the ASF dual-hosted git repository.

jmanno pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/accumulo.git


The following commit(s) were added to refs/heads/main by this push:
     new 5e0bbfc  Wrap undo in try block (#1759)
5e0bbfc is described below

commit 5e0bbfcf7fe74a41cc3283d3f255162a5900c671
Author: Jeffrey Manno <jeffreymann...@gmail.com>
AuthorDate: Wed Nov 4 13:25:33 2020 -0500

    Wrap undo in try block (#1759)
    
    * wrap undo methods in a try block and add exception handling
---
 .../accumulo/master/tableOps/create/ChooseDir.java    | 18 +++++++++++++++---
 .../accumulo/master/tableOps/create/CreateTable.java  | 19 ++++++++++++++-----
 .../master/tableOps/create/FinishCreateTable.java     | 14 +++++++++++---
 3 files changed, 40 insertions(+), 11 deletions(-)

diff --git 
a/server/manager/src/main/java/org/apache/accumulo/master/tableOps/create/ChooseDir.java
 
b/server/manager/src/main/java/org/apache/accumulo/master/tableOps/create/ChooseDir.java
index 4e16f354..e085349 100644
--- 
a/server/manager/src/main/java/org/apache/accumulo/master/tableOps/create/ChooseDir.java
+++ 
b/server/manager/src/main/java/org/apache/accumulo/master/tableOps/create/ChooseDir.java
@@ -35,11 +35,14 @@ import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 class ChooseDir extends MasterRepo {
   private static final long serialVersionUID = 1L;
 
   private final TableInfo tableInfo;
+  private static final Logger log = LoggerFactory.getLogger(ChooseDir.class);
 
   ChooseDir(TableInfo ti) {
     this.tableInfo = ti;
@@ -60,9 +63,18 @@ class ChooseDir extends MasterRepo {
 
   @Override
   public void undo(long tid, Master master) throws Exception {
-    Path p = tableInfo.getSplitDirsPath();
-    FileSystem fs = p.getFileSystem(master.getContext().getHadoopConf());
-    fs.delete(p, true);
+    // Clean up split files if ChooseDir operation fails
+    Path p = null;
+    try {
+      if (tableInfo.getInitialSplitSize() > 0) {
+        p = tableInfo.getSplitDirsPath();
+        FileSystem fs = p.getFileSystem(master.getContext().getHadoopConf());
+        fs.delete(p, true);
+      }
+    } catch (IOException e) {
+      log.error("Failed to undo ChooseDir operation and failed to clean up 
split files at {}", p,
+          e);
+    }
   }
 
   /**
diff --git 
a/server/manager/src/main/java/org/apache/accumulo/master/tableOps/create/CreateTable.java
 
b/server/manager/src/main/java/org/apache/accumulo/master/tableOps/create/CreateTable.java
index 061d05f..ca56428 100644
--- 
a/server/manager/src/main/java/org/apache/accumulo/master/tableOps/create/CreateTable.java
+++ 
b/server/manager/src/main/java/org/apache/accumulo/master/tableOps/create/CreateTable.java
@@ -33,9 +33,12 @@ import org.apache.accumulo.master.tableOps.TableInfo;
 import org.apache.accumulo.master.tableOps.Utils;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class CreateTable extends MasterRepo {
   private static final long serialVersionUID = 1L;
+  private static final Logger log = LoggerFactory.getLogger(CreateTable.class);
 
   private TableInfo tableInfo;
 
@@ -82,12 +85,18 @@ public class CreateTable extends MasterRepo {
   @Override
   public void undo(long tid, Master env) throws IOException {
     // Clean up split files if create table operation fails
-    if (tableInfo.getInitialSplitSize() > 0) {
-      Path p = tableInfo.getSplitPath().getParent();
-      FileSystem fs = p.getFileSystem(env.getContext().getHadoopConf());
-      fs.delete(p, true);
+    Path p = null;
+    try {
+      if (tableInfo.getInitialSplitSize() > 0) {
+        p = tableInfo.getSplitPath().getParent();
+        FileSystem fs = p.getFileSystem(env.getContext().getHadoopConf());
+        fs.delete(p, true);
+      }
+    } catch (IOException e) {
+      log.error("Table failed to be created and failed to clean up split files 
at {}", p, e);
+    } finally {
+      Utils.unreserveNamespace(env, tableInfo.getNamespaceId(), tid, false);
     }
-    Utils.unreserveNamespace(env, tableInfo.getNamespaceId(), tid, false);
   }
 
 }
diff --git 
a/server/manager/src/main/java/org/apache/accumulo/master/tableOps/create/FinishCreateTable.java
 
b/server/manager/src/main/java/org/apache/accumulo/master/tableOps/create/FinishCreateTable.java
index aabb739..c8b515e 100644
--- 
a/server/manager/src/main/java/org/apache/accumulo/master/tableOps/create/FinishCreateTable.java
+++ 
b/server/manager/src/main/java/org/apache/accumulo/master/tableOps/create/FinishCreateTable.java
@@ -29,10 +29,13 @@ import org.apache.accumulo.master.tableOps.TableInfo;
 import org.apache.accumulo.master.tableOps.Utils;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 class FinishCreateTable extends MasterRepo {
 
   private static final long serialVersionUID = 1L;
+  private static final Logger log = 
LoggerFactory.getLogger(FinishCreateTable.class);
 
   private final TableInfo tableInfo;
 
@@ -70,9 +73,14 @@ class FinishCreateTable extends MasterRepo {
   private void cleanupSplitFiles(Master env) throws IOException {
     // it is sufficient to delete from the parent, because both files are in 
the same directory, and
     // we want to delete the directory also
-    Path p = tableInfo.getSplitPath().getParent();
-    FileSystem fs = p.getFileSystem(env.getContext().getHadoopConf());
-    fs.delete(p, true);
+    Path p = null;
+    try {
+      p = tableInfo.getSplitPath().getParent();
+      FileSystem fs = p.getFileSystem(env.getContext().getHadoopConf());
+      fs.delete(p, true);
+    } catch (IOException e) {
+      log.error("Table was created, but failed to clean up temporary splits 
files at {}", p, e);
+    }
   }
 
   @Override

Reply via email to