This is an automated email from the ASF dual-hosted git repository.

ayushsaxena pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
     new 055f43c04a0 HIVE-28005: Remove upgrade-acid module (#5017). (Butao 
Zhang,  reviewed by Ayush Saxena, Denys Kuzmenko)
055f43c04a0 is described below

commit 055f43c04a072e80ed6236fccb7023f4e0220f1a
Author: Butao Zhang <[email protected]>
AuthorDate: Wed Jan 24 16:50:45 2024 +0800

    HIVE-28005: Remove upgrade-acid module (#5017). (Butao Zhang,  reviewed by 
Ayush Saxena, Denys Kuzmenko)
---
 packaging/src/main/assembly/src.xml                |   1 -
 pom.xml                                            |   1 -
 upgrade-acid/pom.xml                               |  68 --
 upgrade-acid/pre-upgrade/pom.xml                   | 360 ----------
 .../hive/upgrade/acid/CloseableThreadLocal.java    |  61 --
 .../hive/upgrade/acid/CompactTablesState.java      |  59 --
 .../hive/upgrade/acid/CompactionMetaInfo.java      |  68 --
 .../acid/NamedForkJoinWorkerThreadFactory.java     |  40 --
 .../hadoop/hive/upgrade/acid/PreUpgradeTool.java   | 750 ---------------------
 .../hadoop/hive/upgrade/acid/RunOptions.java       | 110 ---
 .../upgrade/acid/TestCloseableThreadLocal.java     |  86 ---
 .../hive/upgrade/acid/TestPreUpgradeTool.java      | 495 --------------
 .../hadoop/hive/upgrade/acid/TestRunOptions.java   |  67 --
 13 files changed, 2166 deletions(-)

diff --git a/packaging/src/main/assembly/src.xml 
b/packaging/src/main/assembly/src.xml
index 6cd94585cbf..4d4551798ac 100644
--- a/packaging/src/main/assembly/src.xml
+++ b/packaging/src/main/assembly/src.xml
@@ -110,7 +110,6 @@
         <include>standalone-metastore/pom.xml</include>
         <include>streaming/**/*</include>
         <include>testutils/**/*</include>
-        <include>upgrade-acid/**/*</include>
         <include>vector-code-gen/**/*</include>
         <include>kudu-handler/**/*</include>
         <include>parser/**/*</include>
diff --git a/pom.xml b/pom.xml
index b6959e10cee..1317235e40c 100644
--- a/pom.xml
+++ b/pom.xml
@@ -58,7 +58,6 @@
     <module>testutils</module>
     <module>packaging</module>
     <module>standalone-metastore</module>
-    <module>upgrade-acid</module>
     <module>kafka-handler</module>
   </modules>
   <properties>
diff --git a/upgrade-acid/pom.xml b/upgrade-acid/pom.xml
deleted file mode 100644
index d3fb9b28545..00000000000
--- a/upgrade-acid/pom.xml
+++ /dev/null
@@ -1,68 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"; 
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"; 
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xsd/maven-4.0.0.xsd";>
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <!--disconnected from hive root pom since this module needs 2.x jars-->
-    <groupId>org.apache</groupId>
-    <artifactId>apache</artifactId>
-    <version>23</version>
-  </parent>
-  <!--this module is added to parent pom so that it builds and releases with 
the reset of Hive-->
-  <groupId>org.apache.hive</groupId>
-  <artifactId>hive-upgrade-acid</artifactId>
-  <version>4.0.0-beta-2-SNAPSHOT</version>
-  <name>Hive Upgrade Acid</name>
-  <packaging>pom</packaging>
-  <properties>
-    <!-- Build properties -->
-    <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
-    <maven.compiler.source>1.8</maven.compiler.source>
-    <maven.compiler.target>1.8</maven.compiler.target>
-    
<maven.compiler.useIncrementalCompilation>false</maven.compiler.useIncrementalCompilation>
-    <maven.repo.local>${settings.localRepository}</maven.repo.local>
-    <hive.path.to.root>..</hive.path.to.root>
-    <!-- Plugin versions -->
-    <ant.contrib.version>1.0b3</ant.contrib.version>
-    
<checkstyle.conf.dir>${basedir}/${hive.path.to.root}/checkstyle</checkstyle.conf.dir>
-    <maven.checkstyle.plugin.version>2.17</maven.checkstyle.plugin.version>
-    <junit.jupiter.version>5.6.2</junit.jupiter.version>
-    <junit.vintage.version>5.6.2</junit.vintage.version>
-    <maven.versions.plugin.version>2.16.0</maven.versions.plugin.version>
-    <maven.surefire.plugin.version>3.0.0-M4</maven.surefire.plugin.version>
-  </properties>
-  <modules>
-    <module>pre-upgrade</module>
-  </modules>
-  <build>
-    <plugins>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-checkstyle-plugin</artifactId>
-        <version>${maven.checkstyle.plugin.version}</version>
-        <configuration>
-          
<configLocation>${checkstyle.conf.dir}/checkstyle.xml</configLocation>
-          
<propertyExpansion>config_loc=${checkstyle.conf.dir}</propertyExpansion>
-          <includeTestSourceDirectory>true</includeTestSourceDirectory>
-        </configuration>
-      </plugin>
-      <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>versions-maven-plugin</artifactId>
-        <version>${maven.versions.plugin.version}</version>
-      </plugin>
-    </plugins>
-  </build>
-</project>
diff --git a/upgrade-acid/pre-upgrade/pom.xml b/upgrade-acid/pre-upgrade/pom.xml
deleted file mode 100644
index 989205bd80a..00000000000
--- a/upgrade-acid/pre-upgrade/pom.xml
+++ /dev/null
@@ -1,360 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"; 
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"; 
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xsd/maven-4.0.0.xsd";>
-  <parent>
-    <groupId>org.apache.hive</groupId>
-    <artifactId>hive-upgrade-acid</artifactId>
-    <version>4.0.0-beta-2-SNAPSHOT</version>
-    <relativePath>../pom.xml</relativePath>
-  </parent>
-  <modelVersion>4.0.0</modelVersion>
-  <!--this module is added to parent pom so that it builds and releases with 
the reset of Hive-->
-  <artifactId>hive-pre-upgrade</artifactId>
-  <name>Hive Pre Upgrade Acid</name>
-  <packaging>jar</packaging>
-  <properties>
-    <hive.path.to.root>../..</hive.path.to.root>
-    <!-- Test Properties -->
-    <test.conf.dir>${project.build.directory}/testconf</test.conf.dir>
-    <test.log4j.scheme>file://</test.log4j.scheme>
-    <log4j.conf.dir>${project.basedir}/src/test/resources</log4j.conf.dir>
-    <test.tmp.dir>${project.build.directory}/tmp</test.tmp.dir>
-    
<test.warehouse.dir>${project.build.directory}/warehouse</test.warehouse.dir>
-    <test.warehouse.scheme>file://</test.warehouse.scheme>
-    <test.forkcount>1</test.forkcount>
-    <skipITests>true</skipITests>
-    <hdp.hive.version>2.3.3</hdp.hive.version>
-    <hdp.hadoop.version>2.7.2</hdp.hadoop.version>
-  </properties>
-  <dependencies>
-    <!--scope is 'provided' for all.  The UpgradeTool is provided as part of 
Hive 3.x and
-        supports 2 modes - preUpgrade which runs with 2.x jars on the 
classpath and postUpgrade
-        which runs with 3.x jars.  'provided' should pull these jars for 
compile/test but not
-        for packaging.-->
-    <dependency>
-      <groupId>commons-cli</groupId>
-      <artifactId>commons-cli</artifactId>
-      <version>1.2</version>
-      <scope>provided</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hive</groupId>
-      <artifactId>hive-metastore</artifactId>
-      <version>${hdp.hive.version}</version>
-      <scope>provided</scope>
-      <exclusions>
-        <exclusion>
-          <groupId>tomcat</groupId>
-          <artifactId>jasper-compiler</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>tomcat</groupId>
-          <artifactId>jasper-runtime</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.apache.zookeeper</groupId>
-          <artifactId>zookeeper</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.apache.curator</groupId>
-          <artifactId>curator-framework</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>jdk.tools</groupId>
-          <artifactId>jdk.tools</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hive</groupId>
-      <artifactId>hive-exec</artifactId>
-      <version>${hdp.hive.version}</version>
-      <scope>provided</scope>
-      <exclusions>
-        <exclusion>
-          <groupId>org.codehaus.groovy</groupId>
-          <artifactId>groovy-all</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.apache.zookeeper</groupId>
-          <artifactId>zookeeper</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.apache.curator</groupId>
-          <artifactId>curator-framework</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.pentaho</groupId>
-          <artifactId>pentaho-aggdesigner-algorithm</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-      <version>${hdp.hadoop.version}</version>
-      <scope>provided</scope>
-      <exclusions>
-        <exclusion>
-          <groupId>commons-beanutils</groupId>
-          <artifactId>commons-beanutils</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.apache.zookeeper</groupId>
-          <artifactId>zookeeper</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.apache.curator</groupId>
-          <artifactId>curator-client</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.apache.curator</groupId>
-          <artifactId>curator-recipes</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.apache.curator</groupId>
-          <artifactId>curator-framework</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-    <dependency>
-      <!-- w/o this we get this, even though 
mapreduce.framework.name=mapred.job.tracker=local
-            
https://stackoverflow.com/questions/24096834/org-apache-hadoop-mapred-localclientprotocolprovider-not-found
-
-            2018-05-23T13:01:50,122 ERROR [main] exec.Task: Job Submission 
failed with exception 'java.io.IOException(Cannot initialize Cluster. Please 
check yo\
-ur configuration for mapreduce.framework.name and the correspond server 
addresses.)'
-java.io.IOException: Cannot initialize Cluster. Please check your 
configuration for mapreduce.framework.name and the correspond server addresses.
-        at org.apache.hadoop.mapreduce.Cluster.initialize(Cluster.java:120)
-        at org.apache.hadoop.mapreduce.Cluster.<init>(Cluster.java:82)
-        at org.apache.hadoop.mapreduce.Cluster.<init>(Cluster.java:75)
-        at org.apache.hadoop.mapred.JobClient.init(JobClient.java:470)
-        at org.apache.hadoop.mapred.JobClient.<init>(JobClient.java:449)
-        at 
org.apache.hadoop.hive.ql.exec.mr.ExecDriver.execute(ExecDriver.java:369)
-        at 
org.apache.hadoop.hive.ql.exec.mr.MapRedTask.execute(MapRedTask.java:151)
-        at org.apache.hadoop.hive.ql.exec.Task.executeTask(Task.java:199)
-        at 
org.apache.hadoop.hive.ql.exec.TaskRunner.runSequential(TaskRunner.java:100)
-        at org.apache.hadoop.hive.ql.Driver.launchTask(Driver.java:2183)
-        at org.apache.hadoop.hive.ql.Driver.execute(Driver.java:1839)
-        at org.apache.hadoop.hive.ql.Driver.runInternal(Driver.java:1526)
-        at org.apache.hadoop.hive.ql.Driver.run(Driver.java:1237)
-
-            -->
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-mapreduce-client-common</artifactId>
-      <version>2.7.2</version>
-      <scope>provided</scope>
-      <exclusions>
-        <exclusion>
-          <groupId>org.apache.zookeeper</groupId>
-          <artifactId>zookeeper</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>io.netty</groupId>
-          <artifactId>netty</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.orc</groupId>
-      <artifactId>orc-core</artifactId>
-      <version>1.3.3</version>
-      <scope>provided</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.junit.jupiter</groupId>
-      <artifactId>junit-jupiter-engine</artifactId>
-      <version>${junit.jupiter.version}</version>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.junit.vintage</groupId>
-      <artifactId>junit-vintage-engine</artifactId>
-      <version>${junit.vintage.version}</version>
-      <scope>test</scope>
-    </dependency>
-  </dependencies>
-  <build>
-    <resources>
-      <resource>
-        <directory>${basedir}/src/main/resources</directory>
-        <includes>
-          <include>package.jdo</include>
-        </includes>
-      </resource>
-    </resources>
-    <pluginManagement>
-      <plugins>
-        <plugin>
-          <groupId>org.apache.maven.plugins</groupId>
-          <artifactId>maven-antrun-plugin</artifactId>
-          <dependencies>
-            <dependency>
-              <groupId>ant-contrib</groupId>
-              <artifactId>ant-contrib</artifactId>
-              <version>${ant.contrib.version}</version>
-              <exclusions>
-                <exclusion>
-                  <groupId>ant</groupId>
-                  <artifactId>ant</artifactId>
-                </exclusion>
-              </exclusions>
-            </dependency>
-          </dependencies>
-        </plugin>
-        <plugin>
-          <groupId>org.apache.maven.plugins</groupId>
-          <artifactId>maven-checkstyle-plugin</artifactId>
-          <version>${maven.checkstyle.plugin.version}</version>
-        </plugin>
-        <plugin>
-          <groupId>org.codehaus.mojo</groupId>
-          <artifactId>exec-maven-plugin</artifactId>
-          <version>${maven.exec.plugin.version}</version>
-        </plugin>
-        <plugin>
-          <groupId>org.apache.maven.plugins</groupId>
-          <artifactId>maven-surefire-plugin</artifactId>
-          <version>${maven.surefire.plugin.version}</version>
-        </plugin>
-      </plugins>
-    </pluginManagement>
-    <plugins>
-      <!-- plugins are always listed in sorted order by groupId, artifactId -->
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-antrun-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>setup-test-dirs</id>
-            <phase>process-test-resources</phase>
-            <goals>
-              <goal>run</goal>
-            </goals>
-            <configuration>
-              <target>
-                <delete dir="${test.conf.dir}"/>
-                <delete dir="${test.tmp.dir}"/>
-                <delete dir="${test.warehouse.dir}"/>
-                <mkdir dir="${test.tmp.dir}"/>
-                <mkdir dir="${test.warehouse.dir}"/>
-                <mkdir dir="${test.conf.dir}"/>
-                <!-- copies hive-site.xml so it can be modified -->
-                <copy todir="${test.conf.dir}">
-                  <fileset dir="${basedir}/${hive.path.to.root}/data/conf/"/>
-                </copy>
-              </target>
-            </configuration>
-          </execution>
-          <execution>
-            <id>setup-metastore-scripts</id>
-            <phase>process-test-resources</phase>
-            <goals>
-              <goal>run</goal>
-            </goals>
-            <configuration>
-              <target>
-                <mkdir dir="${test.tmp.dir}/scripts/metastore"/>
-                <copy todir="${test.tmp.dir}/scripts/metastore">
-                  <fileset 
dir="${basedir}/${hive.path.to.root}/metastore/scripts/"/>
-                </copy>
-                <mkdir dir="${test.tmp.dir}/scripts/metastore/upgrade"/>
-                <copy todir="${test.tmp.dir}/scripts/metastore/upgrade">
-                  <fileset 
dir="${basedir}/${hive.path.to.root}/standalone-metastore/metastore-server/src/main/sql/"/>
-                </copy>
-              </target>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-failsafe-plugin</artifactId>
-        <executions>
-          <execution>
-            <goals>
-              <goal>integration-test</goal>
-              <goal>verify</goal>
-            </goals>
-          </execution>
-        </executions>
-        <configuration>
-          <redirectTestOutputToFile>true</redirectTestOutputToFile>
-          <reuseForks>false</reuseForks>
-          <argLine>-Xmx2048m</argLine>
-          <failIfNoTests>false</failIfNoTests>
-          <systemPropertyVariables>
-            <log4j.debug>true</log4j.debug>
-            <java.io.tmpdir>${test.tmp.dir}</java.io.tmpdir>
-            <test.tmp.dir>${test.tmp.dir}</test.tmp.dir>
-            <hive.in.test>true</hive.in.test>
-          </systemPropertyVariables>
-          <additionalClasspathElements>
-            
<additionalClasspathElement>${log4j.conf.dir}</additionalClasspathElement>
-          </additionalClasspathElements>
-          <skipITs>${skipITests}</skipITs>
-          <!-- set this to false to run these tests -->
-        </configuration>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-surefire-plugin</artifactId>
-        <configuration>
-          <redirectTestOutputToFile>true</redirectTestOutputToFile>
-          <reuseForks>false</reuseForks>
-          <forkCount>${test.forkcount}</forkCount>
-          <argLine>-Xmx2048m</argLine>
-          <failIfNoTests>false</failIfNoTests>
-          <systemPropertyVariables>
-            <build.dir>${project.build.directory}</build.dir>
-            
<datanucleus.schema.autoCreateAll>true</datanucleus.schema.autoCreateAll>
-            <derby.version>${derby.version}</derby.version>
-            
<derby.stream.error.file>${test.tmp.dir}/derby.log</derby.stream.error.file>
-            <!--next line needed to get hive.log-->
-            
<log4j.configurationFile>${test.log4j.scheme}${test.conf.dir}/hive-log4j2.properties</log4j.configurationFile>
-            <log4j.debug>true</log4j.debug>
-            <java.io.tmpdir>${test.tmp.dir}</java.io.tmpdir>
-            <!--
-                        use 'memory' to make it run faster
-                        
<javax.jdo.option.ConnectionURL>jdbc:derby:memory:${test.tmp.dir}/junit_metastore_db;create=true</javax.jdo.option.ConnectionURL>-->
-            
<javax.jdo.option.ConnectionURL>jdbc:derby:${test.tmp.dir}/junit_metastore_db;create=true</javax.jdo.option.ConnectionURL>
-            
<metastore.schema.verification>false</metastore.schema.verification>
-            <test.tmp.dir>${test.tmp.dir}</test.tmp.dir>
-            
<metastore.warehouse.dir>${test.warehouse.scheme}${test.warehouse.dir}</metastore.warehouse.dir>
-            <!-- both default to 'local'
-                        <mapred.job.tracker>local</mapred.job.tracker>
-                        
<mapreduce.framework.name>local</mapreduce.framework.name>-->
-          </systemPropertyVariables>
-          <additionalClasspathElements>
-            
<additionalClasspathElement>${log4j.conf.dir}</additionalClasspathElement>
-            
<additionalClasspathElement>${test.conf.dir}</additionalClasspathElement>
-            <!--puts hive-site.xml on classpath - w/o HMS tables are not 
created-->
-            
<additionalClasspathElement>${test.conf.dir}/conf</additionalClasspathElement>
-          </additionalClasspathElements>
-        </configuration>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-jar-plugin</artifactId>
-        <executions>
-          <execution>
-            <goals>
-              <goal>test-jar</goal>
-            </goals>
-          </execution>
-        </executions>
-      </plugin>
-    </plugins>
-  </build>
-</project>
diff --git 
a/upgrade-acid/pre-upgrade/src/main/java/org/apache/hadoop/hive/upgrade/acid/CloseableThreadLocal.java
 
b/upgrade-acid/pre-upgrade/src/main/java/org/apache/hadoop/hive/upgrade/acid/CloseableThreadLocal.java
deleted file mode 100644
index fbe0a80d488..00000000000
--- 
a/upgrade-acid/pre-upgrade/src/main/java/org/apache/hadoop/hive/upgrade/acid/CloseableThreadLocal.java
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.upgrade.acid;
-
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.function.Consumer;
-import java.util.function.Supplier;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class has similar functionality as java.lang.ThreadLocal.
- * Plus it provides a close function to clean up unmanaged resources in all 
threads where the resource was initialized.
- * @param <T> - type of resource
- */
-public class CloseableThreadLocal<T> {
-
-  private static final Logger LOG = 
LoggerFactory.getLogger(CloseableThreadLocal.class);
-
-  private final ConcurrentHashMap<Thread, T> threadLocalMap;
-  private final Supplier<T> initialValue;
-  private final Consumer<T> closeFunction;
-
-  public CloseableThreadLocal(Supplier<T> initialValue, Consumer<T> 
closeFunction, int poolSize) {
-    this.initialValue = initialValue;
-    threadLocalMap = new ConcurrentHashMap<>(poolSize);
-    this.closeFunction = closeFunction;
-  }
-
-  public T get() {
-    return threadLocalMap.computeIfAbsent(Thread.currentThread(), thread -> 
initialValue.get());
-  }
-
-  public void close() {
-    threadLocalMap.values().forEach(this::closeQuietly);
-  }
-
-  private void closeQuietly(T resource) {
-    try {
-      closeFunction.accept(resource);
-    } catch (Exception e) {
-      LOG.warn("Error while closing resource.", e);
-    }
-  }
-}
diff --git 
a/upgrade-acid/pre-upgrade/src/main/java/org/apache/hadoop/hive/upgrade/acid/CompactTablesState.java
 
b/upgrade-acid/pre-upgrade/src/main/java/org/apache/hadoop/hive/upgrade/acid/CompactTablesState.java
deleted file mode 100644
index beb934c83eb..00000000000
--- 
a/upgrade-acid/pre-upgrade/src/main/java/org/apache/hadoop/hive/upgrade/acid/CompactTablesState.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.upgrade.acid;
-
-import static java.util.Collections.emptyList;
-
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * Store result of database and table scan: compaction commands and meta info.
- */
-public final class CompactTablesState {
-
-  public static CompactTablesState empty() {
-    return new CompactTablesState(emptyList(), new CompactionMetaInfo());
-  }
-
-  public static CompactTablesState compactions(List<String> 
compactionCommands, CompactionMetaInfo compactionMetaInfo) {
-    return new CompactTablesState(compactionCommands, compactionMetaInfo);
-  }
-
-  private final List<String> compactionCommands;
-  private final CompactionMetaInfo compactionMetaInfo;
-
-  private CompactTablesState(List<String> compactionCommands, 
CompactionMetaInfo compactionMetaInfo) {
-    this.compactionCommands = compactionCommands;
-    this.compactionMetaInfo = compactionMetaInfo;
-  }
-
-  public List<String> getCompactionCommands() {
-    return compactionCommands;
-  }
-
-  public CompactionMetaInfo getMetaInfo() {
-    return compactionMetaInfo;
-  }
-
-  public CompactTablesState merge(CompactTablesState other) {
-    List<String> compactionCommands = new ArrayList<>(this.compactionCommands);
-    compactionCommands.addAll(other.compactionCommands);
-    return new CompactTablesState(compactionCommands, 
this.compactionMetaInfo.merge(other.compactionMetaInfo));
-  }
-}
diff --git 
a/upgrade-acid/pre-upgrade/src/main/java/org/apache/hadoop/hive/upgrade/acid/CompactionMetaInfo.java
 
b/upgrade-acid/pre-upgrade/src/main/java/org/apache/hadoop/hive/upgrade/acid/CompactionMetaInfo.java
deleted file mode 100644
index 72b4ec63a9e..00000000000
--- 
a/upgrade-acid/pre-upgrade/src/main/java/org/apache/hadoop/hive/upgrade/acid/CompactionMetaInfo.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.upgrade.acid;
-
-import java.util.HashSet;
-import java.util.Set;
-
-/**
- * Store result of compaction calls.
- */
-public class CompactionMetaInfo {
-  /**
-   * total number of bytes to be compacted across all compaction commands.
-   */
-  private long numberOfBytes;
-  /**
-   * IDs of compactions launched by this utility.
-   */
-  private final Set<Long> compactionIds;
-
-  public CompactionMetaInfo() {
-    compactionIds = new HashSet<>();
-    numberOfBytes = 0;
-  }
-
-  private CompactionMetaInfo(Set<Long> initialCompactionIds, long 
initialNumberOfBytes) {
-    this.compactionIds = new HashSet<>(initialCompactionIds);
-    numberOfBytes = initialNumberOfBytes;
-  }
-
-  public CompactionMetaInfo merge(CompactionMetaInfo other) {
-    CompactionMetaInfo result = new CompactionMetaInfo(this.compactionIds, 
this.numberOfBytes);
-    result.numberOfBytes += other.numberOfBytes;
-    result.compactionIds.addAll(other.compactionIds);
-    return result;
-  }
-
-  public long getNumberOfBytes() {
-    return numberOfBytes;
-  }
-
-  public void addBytes(long bytes) {
-    numberOfBytes += bytes;
-  }
-
-  public Set<Long> getCompactionIds() {
-    return compactionIds;
-  }
-
-  public void addCompactionId(long compactionId) {
-    compactionIds.add(compactionId);
-  }
-}
diff --git 
a/upgrade-acid/pre-upgrade/src/main/java/org/apache/hadoop/hive/upgrade/acid/NamedForkJoinWorkerThreadFactory.java
 
b/upgrade-acid/pre-upgrade/src/main/java/org/apache/hadoop/hive/upgrade/acid/NamedForkJoinWorkerThreadFactory.java
deleted file mode 100644
index 2b95f7be961..00000000000
--- 
a/upgrade-acid/pre-upgrade/src/main/java/org/apache/hadoop/hive/upgrade/acid/NamedForkJoinWorkerThreadFactory.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.upgrade.acid;
-
-import java.util.concurrent.ForkJoinPool;
-import java.util.concurrent.ForkJoinWorkerThread;
-
-/**
- * This class allows specifying a prefix for ForkJoinPool thread names.
- */
-public class NamedForkJoinWorkerThreadFactory implements 
ForkJoinPool.ForkJoinWorkerThreadFactory {
-
-  NamedForkJoinWorkerThreadFactory(String namePrefix) {
-    this.namePrefix = namePrefix;
-  }
-
-  private final String namePrefix;
-
-  @Override
-  public ForkJoinWorkerThread newThread(ForkJoinPool pool) {
-    ForkJoinWorkerThread worker = 
ForkJoinPool.defaultForkJoinWorkerThreadFactory.newThread(pool);
-    worker.setName(namePrefix + worker.getName());
-    return worker;
-  }
-}
diff --git 
a/upgrade-acid/pre-upgrade/src/main/java/org/apache/hadoop/hive/upgrade/acid/PreUpgradeTool.java
 
b/upgrade-acid/pre-upgrade/src/main/java/org/apache/hadoop/hive/upgrade/acid/PreUpgradeTool.java
deleted file mode 100644
index b72b236b467..00000000000
--- 
a/upgrade-acid/pre-upgrade/src/main/java/org/apache/hadoop/hive/upgrade/acid/PreUpgradeTool.java
+++ /dev/null
@@ -1,750 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.upgrade.acid;
-
-import static 
org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.escapeSQLString;
-
-import java.io.FileWriter;
-import java.io.IOException;
-import java.io.PrintWriter;
-import java.nio.ByteBuffer;
-import java.nio.charset.CharacterCodingException;
-import java.nio.charset.Charset;
-import java.nio.charset.CharsetDecoder;
-import java.nio.charset.StandardCharsets;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.List;
-import java.util.concurrent.ForkJoinPool;
-import java.util.stream.Collectors;
-
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.CommandLineParser;
-import org.apache.commons.cli.GnuParser;
-import org.apache.commons.cli.HelpFormatter;
-import org.apache.commons.cli.Option;
-import org.apache.commons.cli.Options;
-import org.apache.commons.cli.ParseException;
-import org.apache.commons.lang3.exception.ExceptionUtils;
-import org.apache.hadoop.fs.ContentSummary;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.PathFilter;
-import org.apache.hadoop.hive.common.ValidTxnList;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.HiveMetaHook;
-import org.apache.hadoop.hive.metastore.HiveMetaHookLoader;
-import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
-import org.apache.hadoop.hive.metastore.IMetaStoreClient;
-import org.apache.hadoop.hive.metastore.RetryingMetaStoreClient;
-import org.apache.hadoop.hive.metastore.TableType;
-import org.apache.hadoop.hive.metastore.Warehouse;
-import org.apache.hadoop.hive.metastore.api.CompactionResponse;
-import org.apache.hadoop.hive.metastore.api.CompactionType;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
-import org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
-import org.apache.hadoop.hive.metastore.txn.TxnStore;
-import org.apache.hadoop.hive.metastore.txn.TxnUtils;
-import org.apache.hadoop.hive.ql.io.AcidUtils;
-import org.apache.hadoop.hive.ql.io.orc.OrcFile;
-import org.apache.hadoop.hive.ql.io.orc.Reader;
-import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.session.SessionState;
-import org.apache.hadoop.hive.serde.serdeConstants;
-import org.apache.hadoop.hive.shims.HadoopShims;
-import org.apache.hadoop.security.AccessControlException;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hive.common.util.HiveVersionInfo;
-import org.apache.thrift.TException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.annotations.VisibleForTesting;
-
-/**
- * This utility is designed to help with upgrading Hive 2.x to Hive 3.0.  
On-disk layout for
- * transactional tables has changed in 3.0 and require pre-processing before 
upgrade to ensure
- * they are readable by Hive 3.0.  Some transactional tables (identified by 
this utility) require
- * Major compaction to be run on them before upgrading to 3.0.  Once this 
compaction starts, no
- * more update/delete/merge statements may be executed on these tables until 
upgrade is finished.
- *
- * Additionally, a new type of transactional tables was added in 3.0 - 
insert-only tables.  These
- * tables support ACID semantics and work with any Input/OutputFormat.  Any 
Managed tables may
- * be made insert-only transactional table. These tables don't support 
Update/Delete/Merge commands.
- *
- * Note that depending on the number of tables/partitions and amount of data 
in them compactions
- * may take a significant amount of time and resources.  The script output by 
this utility includes
- * some heuristics that may help estimate the time required.  If no script is 
produced, no action
- * is needed.  For compactions to run an instance of standalone Hive Metastore 
must be running.
- * Please make sure hive.compactor.worker.threads is sufficiently high - this 
specifies the limit
- * of concurrent compactions that may be run.  Each compaction job is a 
Map-Reduce job.
- * hive.compactor.job.queue may be used to set a Yarn queue ame where all 
compaction jobs will be
- * submitted.
- *
- * "execute" option may be supplied to have the utility automatically execute 
the
- * equivalent of the generated commands
- *
- * "location" option may be supplied followed by a path to set the location 
for the generated
- * scripts.
- *
- * Random:
- * This utility connects to the Metastore via API.  It may be necessary to set
- * -Djavax.security.auth.useSubjectCredsOnly=false in Kerberized environment 
if errors like
- * "org.ietf.jgss.GSSException: No valid credentials provided (
- *    Mechanism level: Failed to find any Kerberos tgt)"
- * show up after kinit.
- *
- * See also org.apache.hadoop.hive.ql.util.UpgradeTool in Hive 3.x
- */
-public class PreUpgradeTool implements AutoCloseable {
-  private static final Logger LOG = 
LoggerFactory.getLogger(PreUpgradeTool.class);
-  private static final int PARTITION_BATCH_SIZE = 10000;
-
-  public static void main(String[] args) throws Exception {
-    Options cmdLineOptions = createCommandLineOptions();
-    CommandLineParser parser = new GnuParser();
-    CommandLine line;
-    try {
-      line = parser.parse(cmdLineOptions, args);
-    } catch (ParseException e) {
-      System.err.println("PreUpgradeTool: Parsing failed.  Reason: " + 
e.getLocalizedMessage());
-      printAndExit(cmdLineOptions);
-      return;
-    }
-    if (line.hasOption("help")) {
-      HelpFormatter formatter = new HelpFormatter();
-      formatter.printHelp("upgrade-acid", cmdLineOptions);
-      return;
-    }
-    RunOptions runOptions = RunOptions.fromCommandLine(line);
-    LOG.info("Starting with " + runOptions.toString());
-
-    try {
-      String hiveVer = HiveVersionInfo.getShortVersion();
-      LOG.info("Using Hive Version: " + HiveVersionInfo.getVersion() + " 
build: " +
-              HiveVersionInfo.getBuildVersion());
-      if(!hiveVer.startsWith("2.")) {
-        throw new IllegalStateException("preUpgrade requires Hive 2.x.  
Actual: " + hiveVer);
-      }
-      try (PreUpgradeTool tool = new PreUpgradeTool(runOptions)) {
-        tool.prepareAcidUpgradeInternal();
-      }
-    } catch(Exception ex) {
-      LOG.error("PreUpgradeTool failed", ex);
-      throw ex;
-    }
-  }
-
-  private final HiveConf conf;
-  private final CloseableThreadLocal<IMetaStoreClient> metaStoreClient;
-  private final ThreadLocal<ValidTxnList> txns;
-  private final RunOptions runOptions;
-
-  public PreUpgradeTool(RunOptions runOptions) {
-    this.runOptions = runOptions;
-    this.conf = hiveConf != null ? hiveConf : new HiveConf();
-    this.metaStoreClient = new CloseableThreadLocal<>(this::getHMS, 
IMetaStoreClient::close,
-            runOptions.getTablePoolSize());
-    this.txns = ThreadLocal.withInitial(() -> {
-      /*
-       This API changed from 2.x to 3.0.  so this won't even compile with 3.0
-       but it doesn't need to since we only run this preUpgrade
-      */
-      try {
-        TxnStore txnHandler = TxnUtils.getTxnStore(conf);
-        return 
TxnUtils.createValidCompactTxnList(txnHandler.getOpenTxnsInfo());
-      } catch (MetaException e) {
-        throw new RuntimeException(e);
-      }
-    });
-  }
-
-  private static void printAndExit(Options cmdLineOptions) {
-    HelpFormatter formatter = new HelpFormatter();
-    formatter.printHelp("upgrade-acid", cmdLineOptions);
-    System.exit(1);
-  }
-
-  static Options createCommandLineOptions() {
-    try {
-      Options cmdLineOptions = new Options();
-      cmdLineOptions.addOption(new Option("help", "Generates a script to 
execute on 2.x" +
-          " cluster.  This requires 2.x binaries on the classpath and 
hive-site.xml."));
-      Option exec = new Option("execute",
-          "Executes commands equivalent to generated scrips");
-      exec.setOptionalArg(true);
-      cmdLineOptions.addOption(exec);
-      Option locationOption = new Option("location", true,
-              "Location to write scripts to. Default is CWD.");
-      locationOption.setArgName("path of directory");
-      cmdLineOptions.addOption(locationOption);
-
-      Option dbRegexOption = new Option("d",
-              "Regular expression to match database names on which this tool 
will be run. Default: all databases");
-      dbRegexOption.setLongOpt("dbRegex");
-      dbRegexOption.setArgs(1);
-      dbRegexOption.setArgName("regex");
-      cmdLineOptions.addOption(dbRegexOption);
-
-      Option tableRegexOption = new Option("t",
-              "Regular expression to match table names on which this tool will 
be run. Default: all tables");
-      tableRegexOption.setLongOpt("tableRegex");
-      tableRegexOption.setArgs(1);
-      tableRegexOption.setArgName("regex");
-      cmdLineOptions.addOption(tableRegexOption);
-
-      Option tableTypeOption = new Option("tt",
-              String.format("Table type to match tables on which this tool 
will be run. Possible values: %s " +
-                      "Default: all tables",
-                      
Arrays.stream(TableType.values()).map(Enum::name).collect(Collectors.joining("|"))));
-      tableTypeOption.setLongOpt("tableType");
-      tableTypeOption.setArgs(1);
-      tableTypeOption.setArgName("table type");
-      cmdLineOptions.addOption(tableTypeOption);
-
-      Option tablePoolSizeOption = new Option("tn", "Number of threads to 
process tables.");
-      tablePoolSizeOption.setLongOpt("tablePoolSize");
-      tablePoolSizeOption.setArgs(1);
-      tablePoolSizeOption.setArgName("pool size");
-      cmdLineOptions.addOption(tablePoolSizeOption);
-
-      return cmdLineOptions;
-    } catch(Exception ex) {
-      LOG.error("init()", ex);
-      throw ex;
-    }
-  }
-
-  private static HiveMetaHookLoader getHookLoader() {
-    return new HiveMetaHookLoader() {
-      @Override
-      public HiveMetaHook getHook(
-              org.apache.hadoop.hive.metastore.api.Table tbl) {
-        return null;
-      }
-    };
-  }
-
-  public IMetaStoreClient getHMS() {
-    UserGroupInformation loggedInUser = null;
-    try {
-      loggedInUser = UserGroupInformation.getLoginUser();
-    } catch (IOException e) {
-      LOG.warn("Unable to get logged in user via UGI. err: {}", 
e.getMessage());
-    }
-    boolean secureMode = loggedInUser != null && 
loggedInUser.hasKerberosCredentials();
-    if (secureMode) {
-      conf.setBoolVar(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL, true);
-    }
-    try {
-      LOG.info("Creating metastore client for {}", "PreUpgradeTool");
-      /* I'd rather call return RetryingMetaStoreClient.getProxy(conf, true)
-      which calls HiveMetaStoreClient(HiveConf, Boolean) which exists in
-       (at least) 2.1.0.2.6.5.0-292 and later but not in 2.1.0.2.6.0.3-8 (the 
HDP 2.6 release)
-       i.e. RetryingMetaStoreClient.getProxy(conf, true) is broken in 2.6.0*/
-      IMetaStoreClient client = RetryingMetaStoreClient.getProxy(conf,
-              new Class[]{HiveConf.class, HiveMetaHookLoader.class, 
Boolean.class},
-              new Object[]{conf, getHookLoader(), Boolean.TRUE}, null, 
HiveMetaStoreClient.class.getName());
-      if (hiveConf != null) {
-        SessionState ss = SessionState.start(conf);
-        ss.applyAuthorizationPolicy();
-      }
-      return client;
-    } catch (MetaException | HiveException e) {
-      throw new RuntimeException("Error connecting to Hive Metastore URI: "
-              + conf.getVar(HiveConf.ConfVars.METASTOREURIS) + ". " + 
e.getMessage(), e);
-    }
-  }
-
-  /*
-   * todo: change script comments to a preamble instead of a footer
-   */
-  private void prepareAcidUpgradeInternal()
-      throws HiveException, TException, IOException {
-    if (!isAcidEnabled(conf)) {
-      LOG.info("acid is off, there can't be any acid tables - nothing to 
compact");
-      return;
-    }
-    IMetaStoreClient hms = metaStoreClient.get();
-    LOG.debug("Looking for databases");
-    String exceptionMsg = null;
-    List<String> databases;
-    CompactTablesState compactTablesState;
-    try {
-      databases = hms.getDatabases(runOptions.getDbRegex()); //TException
-      LOG.debug("Found " + databases.size() + " databases to process");
-
-      ForkJoinPool processTablePool = new ForkJoinPool(
-              runOptions.getTablePoolSize(),
-              new NamedForkJoinWorkerThreadFactory("Table-"),
-              getUncaughtExceptionHandler(),
-              false
-              );
-      compactTablesState = databases.stream()
-                      .map(dbName -> processDatabase(dbName, processTablePool, 
runOptions))
-                      .reduce(CompactTablesState::merge)
-              .orElse(CompactTablesState.empty());
-
-    } catch (Exception e) {
-      if (isAccessControlException(e)) {
-        exceptionMsg = "Unable to get databases. Pre-upgrade tool requires 
read-access " +
-          "to databases and tables to determine if a table has to be 
compacted. " +
-          "Set " + 
HiveConf.ConfVars.HIVE_METASTORE_AUTHORIZATION_AUTH_READS.varname + " config to 
" +
-          "false to allow read-access to databases and tables and retry the 
pre-upgrade tool again..";
-      }
-      throw new HiveException(exceptionMsg, e);
-    }
-
-    makeCompactionScript(compactTablesState, runOptions.getOutputDir());
-
-    if(runOptions.isExecute()) {
-      while(compactTablesState.getMetaInfo().getCompactionIds().size() > 0) {
-        LOG.debug("Will wait for " + 
compactTablesState.getMetaInfo().getCompactionIds().size() +
-            " compactions to complete");
-        ShowCompactResponse resp = hms.showCompactions();
-        for(ShowCompactResponseElement e : resp.getCompacts()) {
-          final String state = e.getState();
-          boolean removed;
-          switch (state) {
-          case TxnStore.CLEANING_RESPONSE:
-          case TxnStore.SUCCEEDED_RESPONSE:
-            removed = 
compactTablesState.getMetaInfo().getCompactionIds().remove(e.getId());
-            if(removed) {
-              LOG.debug("Required compaction succeeded: " + e.toString());
-            }
-            break;
-          case TxnStore.ATTEMPTED_RESPONSE:
-          case TxnStore.FAILED_RESPONSE:
-            removed = 
compactTablesState.getMetaInfo().getCompactionIds().remove(e.getId());
-            if(removed) {
-              LOG.warn("Required compaction failed: " + e.toString());
-            }
-            break;
-          case TxnStore.INITIATED_RESPONSE:
-            //may flood the log
-            //LOG.debug("Still waiting  on: " + e.toString());
-            break;
-          case TxnStore.WORKING_RESPONSE:
-            LOG.debug("Still working on: " + e.toString());
-            break;
-          default://shouldn't be any others
-            LOG.error("Unexpected state for : " + e.toString());
-          }
-        }
-        if(compactTablesState.getMetaInfo().getCompactionIds().size() > 0) {
-          try {
-            if (callback != null) {
-              callback.onWaitForCompaction();
-            }
-            Thread.sleep(pollIntervalMs);
-          } catch (InterruptedException ex) {
-            //this only responds to ^C
-          }
-        }
-      }
-    }
-  }
-
-  private Thread.UncaughtExceptionHandler getUncaughtExceptionHandler() {
-    return (t, e) -> LOG.error(String.format("Thread %s exited with error", 
t.getName()), e);
-  }
-
-  private CompactTablesState processDatabase(
-          String dbName, ForkJoinPool threadPool, RunOptions runOptions) {
-    try {
-      IMetaStoreClient hms = metaStoreClient.get();
-
-      List<String> tables;
-      if (runOptions.getTableType() == null) {
-        tables = hms.getTables(dbName, runOptions.getTableRegex());
-        LOG.debug("found {} tables in {}", tables.size(), dbName);
-      } else {
-        tables = hms.getTables(dbName, runOptions.getTableRegex(), 
runOptions.getTableType());
-        LOG.debug("found {} {} in {}", tables.size(), 
runOptions.getTableType().name(), dbName);
-      }
-
-      return threadPool.submit(
-          () -> tables.parallelStream()
-                .map(table -> processTable(dbName, table, runOptions))
-                .reduce(CompactTablesState::merge)).get()
-                .orElse(CompactTablesState.empty());
-    } catch (Exception e) {
-      if (isAccessControlException(e)) {
-        // we may not have access to read all tables from this db
-        throw new RuntimeException("Unable to access " + dbName + ". 
Pre-upgrade tool requires read-access " +
-                "to databases and tables to determine if a table has to be 
compacted. " +
-                "Set " + 
HiveConf.ConfVars.HIVE_METASTORE_AUTHORIZATION_AUTH_READS.varname + " config to 
" +
-                "false to allow read-access to databases and tables and retry 
the pre-upgrade tool again..", e);
-      }
-      throw new RuntimeException(e);
-    }
-  }
-
-  private CompactTablesState processTable(
-          String dbName, String tableName, RunOptions runOptions) {
-    try {
-      IMetaStoreClient hms = metaStoreClient.get();
-      final CompactionMetaInfo compactionMetaInfo = new CompactionMetaInfo();
-
-      Table t = hms.getTable(dbName, tableName);
-      LOG.debug("processing table " + Warehouse.getQualifiedName(t));
-      List<String> compactionCommands =
-              getCompactionCommands(t, conf, hms, compactionMetaInfo, 
runOptions.isExecute(), txns.get());
-      return CompactTablesState.compactions(compactionCommands, 
compactionMetaInfo);
-      /*todo: handle renaming files somewhere*/
-    } catch (Exception e) {
-      if (isAccessControlException(e)) {
-        // this could be external table with 0 permission for hive user
-        throw new RuntimeException(
-                "Unable to access " + dbName + "." + tableName + ". 
Pre-upgrade tool requires read-access " +
-                "to databases and tables to determine if a table has to be 
compacted. " +
-                "Set " + 
HiveConf.ConfVars.HIVE_METASTORE_AUTHORIZATION_AUTH_READS.varname + " config to 
" +
-                "false to allow read-access to databases and tables and retry 
the pre-upgrade tool again..", e);
-      }
-      throw new RuntimeException(e);
-    }
-  }
-
-  private boolean isAccessControlException(final Exception e) {
-    // hadoop security AccessControlException
-    if ((e instanceof MetaException && e.getCause() instanceof 
AccessControlException) ||
-        ExceptionUtils.getRootCause(e) instanceof AccessControlException) {
-      return true;
-    }
-
-    // java security AccessControlException
-    if ((e instanceof MetaException && e.getCause() instanceof 
java.security.AccessControlException) ||
-        ExceptionUtils.getRootCause(e) instanceof 
java.security.AccessControlException) {
-      return true;
-    }
-
-    // metastore in some cases sets the AccessControlException as message 
instead of wrapping the exception
-    return e instanceof MetaException
-            && 
e.getMessage().startsWith("java.security.AccessControlException: Permission 
denied");
-  }
-
-  /**
-   * Generates a set compaction commands to run on pre Hive 3 cluster.
-   */
-  private static void makeCompactionScript(CompactTablesState result, String 
scriptLocation) throws IOException {
-    if (result.getCompactionCommands().isEmpty()) {
-      LOG.info("No compaction is necessary");
-      return;
-    }
-    String fileName = "compacts_" + System.currentTimeMillis() + ".sql";
-    LOG.debug("Writing compaction commands to " + fileName);
-    try(PrintWriter pw = createScript(
-            result.getCompactionCommands(), fileName, scriptLocation)) {
-      //add post script
-      pw.println("-- Generated total of " + 
result.getCompactionCommands().size() + " compaction commands");
-      if(result.getMetaInfo().getNumberOfBytes() < Math.pow(2, 20)) {
-        //to see it working in UTs
-        pw.println("-- The total volume of data to be compacted is " +
-            String.format("%.6fMB", 
result.getMetaInfo().getNumberOfBytes()/Math.pow(2, 20)));
-      } else {
-        pw.println("-- The total volume of data to be compacted is " +
-            String.format("%.3fGB", 
result.getMetaInfo().getNumberOfBytes()/Math.pow(2, 30)));
-      }
-      pw.println();
-      //todo: should be at the top of the file...
-      pw.println(
-          "-- Please note that compaction may be a heavyweight and time 
consuming process.\n" +
-              "-- Submitting all of these commands will enqueue them to a 
scheduling queue from\n" +
-              "-- which they will be picked up by compactor Workers.  The max 
number of\n" +
-              "-- concurrent Workers is controlled by 
hive.compactor.worker.threads configured\n" +
-              "-- for the standalone metastore process.  Compaction itself is 
a Map-Reduce job\n" +
-              "-- which is submitted to the YARN queue identified by 
hive.compactor.job.queue\n" +
-              "-- property if defined or 'default' if not defined.  It's 
advisable to set the\n" +
-              "-- capacity of this queue appropriately");
-    }
-  }
-
-  private static PrintWriter createScript(List<String> commands, String 
fileName,
-      String scriptLocation) throws IOException {
-    FileWriter fw = new FileWriter(scriptLocation + "/" + fileName);
-    PrintWriter pw = new PrintWriter(fw);
-    for(String cmd : commands) {
-      pw.println(cmd + ";");
-    }
-    return pw;
-  }
-  /**
-   * @return any compaction commands to run for {@code Table t}
-   */
-  private static List<String> getCompactionCommands(Table t, HiveConf conf,
-      IMetaStoreClient hms, CompactionMetaInfo compactionMetaInfo, boolean 
execute,
-      ValidTxnList txns) throws IOException, TException, HiveException {
-    if(!isFullAcidTable(t)) {
-      return Collections.emptyList();
-    }
-    if(t.getPartitionKeysSize() <= 0) {
-      //not partitioned
-      if(!needsCompaction(new Path(t.getSd().getLocation()), conf, 
compactionMetaInfo, txns)) {
-        return Collections.emptyList();
-      }
-
-      List<String> cmds = new ArrayList<>();
-      cmds.add(getCompactionCommand(t, null));
-      if(execute) {
-        scheduleCompaction(t, null, hms, compactionMetaInfo);
-      }
-      return cmds;
-    }
-    List<String> partNames = hms.listPartitionNames(t.getDbName(), 
t.getTableName(), (short)-1);
-    int batchSize = PARTITION_BATCH_SIZE;
-    int numWholeBatches = partNames.size()/batchSize;
-    List<String> compactionCommands = new ArrayList<>();
-    for(int i = 0; i < numWholeBatches; i++) {
-      List<Partition> partitionList = hms.getPartitionsByNames(t.getDbName(), 
t.getTableName(),
-          partNames.subList(i * batchSize, (i + 1) * batchSize));
-      getCompactionCommands(t, partitionList, hms, execute, compactionCommands,
-          compactionMetaInfo, conf, txns);
-    }
-    if(numWholeBatches * batchSize < partNames.size()) {
-      //last partial batch
-      List<Partition> partitionList = hms.getPartitionsByNames(t.getDbName(), 
t.getTableName(),
-          partNames.subList(numWholeBatches * batchSize, partNames.size()));
-      getCompactionCommands(t, partitionList, hms, execute, compactionCommands,
-          compactionMetaInfo, conf, txns);
-    }
-    return compactionCommands;
-  }
-  private static void getCompactionCommands(Table t, List<Partition> 
partitionList, IMetaStoreClient hms,
-      boolean execute, List<String> compactionCommands, CompactionMetaInfo 
compactionMetaInfo,
-      HiveConf conf, ValidTxnList txns)
-      throws IOException, TException, HiveException {
-    for (Partition p : partitionList) {
-      if (needsCompaction(new Path(p.getSd().getLocation()), conf, 
compactionMetaInfo, txns)) {
-        compactionCommands.add(getCompactionCommand(t, p));
-        if (execute) {
-          scheduleCompaction(t, p, hms, compactionMetaInfo);
-        }
-      }
-    }
-  }
-  private static void scheduleCompaction(Table t, Partition p, 
IMetaStoreClient db,
-      CompactionMetaInfo compactionMetaInfo) throws HiveException, 
MetaException {
-    String partName = p == null ? null :
-        Warehouse.makePartName(t.getPartitionKeys(), p.getValues());
-    try {
-      CompactionResponse resp =
-              //this gives an easy way to get at compaction ID so we can only 
wait for those this
-              //utility started
-              db.compact2(t.getDbName(), t.getTableName(), partName, 
CompactionType.MAJOR, null);
-      if (!resp.isAccepted()) {
-        LOG.info(Warehouse.getQualifiedName(t) + (p == null ? "" : "/" + 
partName) +
-                " is already being compacted with id=" + resp.getId());
-      } else {
-        LOG.info("Scheduled compaction for " + Warehouse.getQualifiedName(t) +
-                (p == null ? "" : "/" + partName) + " with id=" + 
resp.getId());
-      }
-      compactionMetaInfo.addCompactionId(resp.getId());
-    } catch (TException e) {
-      throw new HiveException(e);
-    }
-  }
-
-  /**
-   *
-   * @param location - path to a partition (or table if not partitioned) dir
-   */
-  private static boolean needsCompaction(Path location, HiveConf conf,
-      CompactionMetaInfo compactionMetaInfo, ValidTxnList txns) throws 
IOException {
-    FileSystem fs = location.getFileSystem(conf);
-    FileStatus[] deltas = fs.listStatus(location, new PathFilter() {
-      @Override
-      public boolean accept(Path path) {
-        //checking for delete_delta is only so that this functionality can be 
exercised by code 3.0
-        //which cannot produce any deltas with mix of update/insert events
-        return path.getName().startsWith("delta_") || 
path.getName().startsWith("delete_delta_");
-      }
-    });
-    if(deltas == null || deltas.length == 0) {
-      //base_n cannot contain update/delete.  Original files are all 'insert' 
and we need to compact
-      //only if there are update/delete events.
-      return false;
-    }
-    /*getAcidState() is smart not to return any deltas in current if there is 
a base that covers
-    * them, i.e. if they were compacted but not yet cleaned.  This means 
re-checking if
-    * compaction is needed should cheap(er)*/
-    AcidUtils.Directory dir = AcidUtils.getAcidState(location, conf, txns);
-    deltaLoop: for(AcidUtils.ParsedDelta delta : dir.getCurrentDirectories()) {
-      FileStatus[] buckets = fs.listStatus(delta.getPath(), new PathFilter() {
-        @Override
-        public boolean accept(Path path) {
-          //since this is inside a delta dir created by Hive 2.x or earlier it 
can only contain
-          //bucket_x or bucket_x__flush_length
-          return path.getName().startsWith("bucket_");
-        }
-      });
-      for(FileStatus bucket : buckets) {
-        if(bucket.getPath().getName().endsWith("_flush_length")) {
-          //streaming ingest dir - cannot have update/delete events
-          continue deltaLoop;
-        }
-        if(needsCompaction(bucket, fs)) {
-          //found delete events - this 'location' needs compacting
-          compactionMetaInfo.addBytes(getDataSize(location, conf));
-
-          //if there are un-compacted original files, they will be included in 
compaction, so
-          //count at the size for 'cost' estimation later
-          for(HadoopShims.HdfsFileStatusWithId origFile : 
dir.getOriginalFiles()) {
-            FileStatus fileStatus = origFile.getFileStatus();
-            if(fileStatus != null) {
-              compactionMetaInfo.addBytes(fileStatus.getLen());
-            }
-          }
-          return true;
-        }
-      }
-    }
-    return false;
-  }
-
-  /**
-   * @param location - path to a partition (or table if not partitioned) dir
-   */
-  private static long getDataSize(Path location, HiveConf conf) throws 
IOException {
-    FileSystem fs = location.getFileSystem(conf);
-    ContentSummary cs = fs.getContentSummary(location);
-    return cs.getLength();
-  }
-
-
-  private static final Charset UTF_8 = StandardCharsets.UTF_8;
-  private static final ThreadLocal<CharsetDecoder> UTF8_DECODER =
-          ThreadLocal.withInitial(UTF_8::newDecoder);
-  private static final String ACID_STATS = "hive.acid.stats";
-
-  private static boolean needsCompaction(FileStatus bucket, FileSystem fs) 
throws IOException {
-    //create reader, look at footer
-    //no need to check side file since it can only be in a streaming ingest 
delta
-    Reader orcReader = OrcFile.createReader(bucket.getPath(), 
OrcFile.readerOptions(fs.getConf()).filesystem(fs));
-    if (orcReader.hasMetadataValue(ACID_STATS)) {
-      try {
-        ByteBuffer val = orcReader.getMetadataValue(ACID_STATS).duplicate();
-        String acidStats = UTF8_DECODER.get().decode(val).toString();
-        String[] parts = acidStats.split(",");
-        long updates = Long.parseLong(parts[1]);
-        long deletes = Long.parseLong(parts[2]);
-        return deletes > 0 || updates > 0;
-      } catch (CharacterCodingException e) {
-        throw new IllegalArgumentException("Bad string encoding for " + 
ACID_STATS, e);
-      }
-    } else {
-      throw new IllegalStateException("AcidStats missing in " + 
bucket.getPath());
-    }
-  }
-
-  private static String getCompactionCommand(Table t, Partition p) {
-    StringBuilder sb = new StringBuilder("ALTER TABLE 
").append(Warehouse.getQualifiedName(t));
-    if(t.getPartitionKeysSize() > 0) {
-      assert p != null : "must supply partition for partitioned table " +
-          Warehouse.getQualifiedName(t);
-      sb.append(" PARTITION(");
-      for (int i = 0; i < t.getPartitionKeysSize(); i++) {
-        sb.append(t.getPartitionKeys().get(i).getName()).append('=').append(
-            genPartValueString(t.getPartitionKeys().get(i).getType(), 
p.getValues().get(i))).
-            append(",");
-      }
-      //replace trailing ','
-      sb.setCharAt(sb.length() - 1, ')');
-    }
-    return sb.append(" COMPACT 'major'").toString();
-  }
-
-  /**
-   * This is copy-pasted from {@link 
org.apache.hadoop.hive.ql.parse.ColumnStatsSemanticAnalyzer},
-   * which can't be refactored since this is linked against Hive 2.x .
-   */
-  private static String genPartValueString(String partColType, String partVal) 
 {
-    String returnVal;
-    if (partColType.equals(serdeConstants.STRING_TYPE_NAME) ||
-        partColType.contains(serdeConstants.VARCHAR_TYPE_NAME) ||
-        partColType.contains(serdeConstants.CHAR_TYPE_NAME)) {
-      returnVal = "'" + escapeSQLString(partVal) + "'";
-    } else if (partColType.equals(serdeConstants.TINYINT_TYPE_NAME)) {
-      returnVal = partVal + "Y";
-    } else if (partColType.equals(serdeConstants.SMALLINT_TYPE_NAME)) {
-      returnVal = partVal + "S";
-    } else if (partColType.equals(serdeConstants.INT_TYPE_NAME)) {
-      returnVal = partVal;
-    } else if (partColType.equals(serdeConstants.BIGINT_TYPE_NAME)) {
-      returnVal = partVal + "L";
-    } else if (partColType.contains(serdeConstants.DECIMAL_TYPE_NAME)) {
-      returnVal = partVal + "BD";
-    } else if (partColType.equals(serdeConstants.DATE_TYPE_NAME) ||
-        partColType.equals(serdeConstants.TIMESTAMP_TYPE_NAME)) {
-      returnVal = partColType + " '" + escapeSQLString(partVal) + "'";
-    } else {
-      //for other usually not used types, just quote the value
-      returnVal = "'" + escapeSQLString(partVal) + "'";
-    }
-
-    return returnVal;
-  }
-  private static boolean isFullAcidTable(Table t) {
-    if (t.getParametersSize() <= 0) {
-      //cannot be acid
-      return false;
-    }
-    String transacationalValue = t.getParameters()
-        .get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL);
-    if ("true".equalsIgnoreCase(transacationalValue)) {
-      System.out.println("Found Acid table: " + Warehouse.getQualifiedName(t));
-      return true;
-    }
-    return false;
-  }
-  private static boolean isAcidEnabled(HiveConf hiveConf) {
-    String txnMgr = hiveConf.getVar(HiveConf.ConfVars.HIVE_TXN_MANAGER);
-    boolean concurrency =  
hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY);
-    String dbTxnMgr = "org.apache.hadoop.hive.ql.lockmgr.DbTxnManager";
-    return txnMgr.equals(dbTxnMgr) && concurrency;
-  }
-
-  @Override
-  public void close() {
-    metaStoreClient.close();
-  }
-
-  @VisibleForTesting
-  abstract static class Callback {
-    /**
-     * This is a hack enable Unit testing.  Derby can't handle multiple 
concurrent threads but
-     * somehow Compactor needs to run to test "execute" mode.  This callback 
can be used
-     * to run Worker.  For TESTING ONLY.
-     */
-    void onWaitForCompaction() throws MetaException {}
-  }
-  @VisibleForTesting
-  static Callback callback;
-  @VisibleForTesting
-  static int pollIntervalMs = 1000*30;
-  /**
-   * can set it from tests to test when config needs something other than 
default values.
-   */
-  @VisibleForTesting
-  static HiveConf hiveConf = null;
-}
diff --git 
a/upgrade-acid/pre-upgrade/src/main/java/org/apache/hadoop/hive/upgrade/acid/RunOptions.java
 
b/upgrade-acid/pre-upgrade/src/main/java/org/apache/hadoop/hive/upgrade/acid/RunOptions.java
deleted file mode 100644
index 534b971a771..00000000000
--- 
a/upgrade-acid/pre-upgrade/src/main/java/org/apache/hadoop/hive/upgrade/acid/RunOptions.java
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.upgrade.acid;
-
-import org.apache.commons.cli.CommandLine;
-import org.apache.hadoop.hive.metastore.TableType;
-
-/**
- * This class's instance holds the option values were passed by the user via 
the command line.
- */
-public class RunOptions {
-
-  public static RunOptions fromCommandLine(CommandLine commandLine) {
-    String tableTypeText = commandLine.getOptionValue("tableType");
-
-    int defaultPoolSize = Runtime.getRuntime().availableProcessors();
-    if (defaultPoolSize < 1)
-      defaultPoolSize = 1;
-
-    int tablePoolSize = getIntOptionValue(commandLine, "tablePoolSize", 
defaultPoolSize);
-    if (tablePoolSize < 1)
-      throw new IllegalArgumentException("Please specify a positive integer 
option value for tablePoolSize");
-
-    return new RunOptions(
-      commandLine.getOptionValue("location", "."),
-      commandLine.hasOption("execute"),
-      commandLine.getOptionValue("dbRegex", ".*"),
-      commandLine.getOptionValue("tableRegex", ".*"),
-      tableTypeText == null ? null : TableType.valueOf(tableTypeText),
-      tablePoolSize);
-  }
-
-  private static int getIntOptionValue(CommandLine commandLine, String 
optionName, int defaultValue) {
-    if (commandLine.hasOption(optionName)) {
-      try {
-        return Integer.parseInt(commandLine.getOptionValue(optionName));
-      } catch (NumberFormatException e) {
-        throw new IllegalArgumentException("Please specify a positive integer 
option value for " + optionName, e);
-      }
-    }
-    return defaultValue;
-  }
-
-  private final String outputDir;
-  private final boolean execute;
-  private final String dbRegex;
-  private final String tableRegex;
-  private final TableType tableType;
-  private final int tablePoolSize;
-
-  private RunOptions(String outputDir, boolean execute, String dbRegex, String 
tableRegex, TableType tableType, int tablePoolSize) {
-    this.outputDir = outputDir;
-    this.execute = execute;
-    this.dbRegex = dbRegex;
-    this.tableRegex = tableRegex;
-    this.tableType = tableType;
-    this.tablePoolSize = tablePoolSize;
-  }
-
-  public String getOutputDir() {
-    return outputDir;
-  }
-
-  public boolean isExecute() {
-    return execute;
-  }
-
-  public String getDbRegex() {
-    return dbRegex;
-  }
-
-  public String getTableRegex() {
-    return tableRegex;
-  }
-
-  public TableType getTableType() {
-    return tableType;
-  }
-
-  public int getTablePoolSize() {
-    return tablePoolSize;
-  }
-
-  @Override
-  public String toString() {
-    return "RunOptions{" +
-            "outputDir='" + outputDir + '\'' +
-            ", execute=" + execute +
-            ", dbRegex='" + dbRegex + '\'' +
-            ", tableRegex='" + tableRegex + '\'' +
-            ", tableType=" + tableType +
-            ", tablePoolSize=" + tablePoolSize +
-            '}';
-  }
-}
diff --git 
a/upgrade-acid/pre-upgrade/src/test/java/org/apache/hadoop/hive/upgrade/acid/TestCloseableThreadLocal.java
 
b/upgrade-acid/pre-upgrade/src/test/java/org/apache/hadoop/hive/upgrade/acid/TestCloseableThreadLocal.java
deleted file mode 100644
index 2584a3be528..00000000000
--- 
a/upgrade-acid/pre-upgrade/src/test/java/org/apache/hadoop/hive/upgrade/acid/TestCloseableThreadLocal.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.upgrade.acid;
-
-import static org.hamcrest.CoreMatchers.not;
-import static org.hamcrest.core.Is.is;
-import static org.junit.Assert.assertThat;
-
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.ExecutionException;
-
-import org.junit.Test;
-
-public class TestCloseableThreadLocal {
-
-  private static class AutoCloseableStub implements AutoCloseable {
-
-    private boolean closed = false;
-
-    public boolean isClosed() {
-      return closed;
-    }
-
-    @Override
-    public void close() {
-      closed = true;
-    }
-  }
-
-  @Test
-  public void testResourcesAreInitiallyNotClosed() {
-    CloseableThreadLocal<AutoCloseableStub> closeableThreadLocal =
-            new CloseableThreadLocal<>(AutoCloseableStub::new, 
AutoCloseableStub::close, 1);
-
-    assertThat(closeableThreadLocal.get().isClosed(), is(false));
-  }
-
-  @Test
-  public void testAfterCallingCloseAllInstancesAreClosed() throws 
ExecutionException, InterruptedException {
-    CloseableThreadLocal<AutoCloseableStub> closeableThreadLocal =
-            new CloseableThreadLocal<>(AutoCloseableStub::new, 
AutoCloseableStub::close, 2);
-
-    AutoCloseableStub asyncInstance = 
CompletableFuture.supplyAsync(closeableThreadLocal::get).get();
-    AutoCloseableStub syncInstance = closeableThreadLocal.get();
-
-    closeableThreadLocal.close();
-
-    assertThat(asyncInstance.isClosed(), is(true));
-    assertThat(syncInstance.isClosed(), is(true));
-  }
-
-  @Test
-  public void testSubsequentGetsInTheSameThreadGivesBackTheSameObject() {
-    CloseableThreadLocal<AutoCloseableStub> closeableThreadLocal =
-            new CloseableThreadLocal<>(AutoCloseableStub::new, 
AutoCloseableStub::close, 2);
-
-    AutoCloseableStub ref1 = closeableThreadLocal.get();
-    AutoCloseableStub ref2 = closeableThreadLocal.get();
-    assertThat(ref1, is(ref2));
-  }
-
-  @Test
-  public void testDifferentThreadsHasDifferentInstancesOfTheResource() throws 
ExecutionException, InterruptedException {
-    CloseableThreadLocal<AutoCloseableStub> closeableThreadLocal =
-            new CloseableThreadLocal<>(AutoCloseableStub::new, 
AutoCloseableStub::close, 2);
-
-    AutoCloseableStub asyncInstance = 
CompletableFuture.supplyAsync(closeableThreadLocal::get).get();
-    AutoCloseableStub syncInstance = closeableThreadLocal.get();
-    assertThat(asyncInstance, is(not(syncInstance)));
-  }
-}
\ No newline at end of file
diff --git 
a/upgrade-acid/pre-upgrade/src/test/java/org/apache/hadoop/hive/upgrade/acid/TestPreUpgradeTool.java
 
b/upgrade-acid/pre-upgrade/src/test/java/org/apache/hadoop/hive/upgrade/acid/TestPreUpgradeTool.java
deleted file mode 100644
index 2064baa544c..00000000000
--- 
a/upgrade-acid/pre-upgrade/src/test/java/org/apache/hadoop/hive/upgrade/acid/TestPreUpgradeTool.java
+++ /dev/null
@@ -1,495 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.upgrade.acid;
-
-import static org.hamcrest.CoreMatchers.allOf;
-import static org.hamcrest.CoreMatchers.hasItem;
-import static org.hamcrest.CoreMatchers.is;
-import static org.hamcrest.CoreMatchers.not;
-import static org.hamcrest.CoreMatchers.nullValue;
-import static org.hamcrest.MatcherAssert.assertThat;
-import static org.hamcrest.core.StringContains.containsString;
-
-import java.io.File;
-import java.io.IOException;
-import java.nio.file.Files;
-import java.nio.file.attribute.PosixFilePermission;
-import java.nio.file.attribute.PosixFilePermissions;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Set;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import org.apache.commons.lang3.StringUtils;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.common.FileUtils;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.ShowCompactRequest;
-import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
-import org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement;
-import org.apache.hadoop.hive.metastore.txn.TxnDbUtil;
-import org.apache.hadoop.hive.metastore.txn.TxnStore;
-import org.apache.hadoop.hive.metastore.txn.TxnUtils;
-import org.apache.hadoop.hive.ql.Driver;
-import org.apache.hadoop.hive.ql.QueryState;
-import org.apache.hadoop.hive.ql.io.HiveInputFormat;
-import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
-import org.apache.hadoop.hive.ql.session.SessionState;
-import org.apache.hadoop.hive.ql.txn.compactor.Worker;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TestName;
-
-public class TestPreUpgradeTool {
-  private static final String TEST_DATA_DIR = new 
File(System.getProperty("java.io.tmpdir") +
-      File.separator + TestPreUpgradeTool.class.getCanonicalName() + "-" + 
System.currentTimeMillis()
-  ).getPath().replaceAll("\\\\", "/");
-
-  private String getTestDataDir() {
-    return TEST_DATA_DIR;
-  }
-
-  /**
-   * preUpgrade: test tables that need to be compacted, waits for compaction
-   * postUpgrade: generates scripts w/o asserts
-   */
-  @Test
-  public void testUpgrade() throws Exception {
-    int[][] data = {{1, 2}, {3, 4}, {5, 6}};
-    int[][] dataPart = {{1, 2, 10}, {3, 4, 11}, {5, 6, 12}};
-    runStatementOnDriver("drop table if exists TAcid");
-    runStatementOnDriver("drop table if exists TAcidPart");
-    runStatementOnDriver("drop table if exists TFlat");
-    runStatementOnDriver("drop table if exists TFlatText");
-
-    try {
-      runStatementOnDriver(
-        "create table TAcid (a int, b int) clustered by (b) into 2 buckets 
stored as orc TBLPROPERTIES ('transactional'='true')");
-      runStatementOnDriver(
-        "create table TAcidPart (a int, b int) partitioned by (p tinyint)  
clustered by (b) into 2 buckets  stored" +
-          " as orc TBLPROPERTIES ('transactional'='true')");
-      //on 2.x these are guaranteed to not be acid
-      runStatementOnDriver("create table TFlat (a int, b int) stored as orc 
tblproperties('transactional'='false')");
-      runStatementOnDriver(
-        "create table TFlatText (a int, b int) stored as textfile 
tblproperties('transactional'='false')");
-
-
-      //this needs major compaction
-      runStatementOnDriver("insert into TAcid" + makeValuesClause(data));
-      runStatementOnDriver("update TAcid set a = 1 where b = 2");
-
-      //this table needs to be converted to CRUD Acid
-      runStatementOnDriver("insert into TFlat" + makeValuesClause(data));
-
-      //this table needs to be converted to MM
-      runStatementOnDriver("insert into TFlatText" + makeValuesClause(data));
-
-      //p=10 needs major compaction
-      runStatementOnDriver("insert into TAcidPart partition(p)" + 
makeValuesClause(dataPart));
-      runStatementOnDriver("update TAcidPart set a = 1 where b = 2 and p = 
10");
-
-      //todo: add partitioned table that needs conversion to MM/Acid
-
-      //todo: rename files case
-      String[] args = {"-location", getTestDataDir(), "-execute"};
-      PreUpgradeTool.callback = new PreUpgradeTool.Callback() {
-        @Override
-        void onWaitForCompaction() throws MetaException {
-          runWorker(hiveConf);
-        }
-      };
-      PreUpgradeTool.pollIntervalMs = 1;
-      PreUpgradeTool.hiveConf = hiveConf;
-      PreUpgradeTool.main(args);
-
-      String[] scriptFiles = getScriptFiles();
-      assertThat(scriptFiles.length, is(1));
-
-      List<String> scriptContent = loadScriptContent(new 
File(getTestDataDir(), scriptFiles[0]));
-      assertThat(scriptContent.size(), is(2));
-      assertThat(scriptContent, hasItem(is("ALTER TABLE default.tacid COMPACT 
'major';")));
-      assertThat(scriptContent, hasItem(is("ALTER TABLE default.tacidpart 
PARTITION(p=10Y) COMPACT 'major';")));
-
-      TxnStore txnHandler = TxnUtils.getTxnStore(hiveConf);
-
-      ShowCompactResponse resp = txnHandler.showCompact(new 
ShowCompactRequest());
-      Assert.assertEquals(2, resp.getCompactsSize());
-      for (ShowCompactResponseElement e : resp.getCompacts()) {
-        Assert.assertEquals(e.toString(), TxnStore.CLEANING_RESPONSE, 
e.getState());
-      }
-
-      // Check whether compaction was successful in the first run
-      File secondRunDataDir = new File(getTestDataDir(), "secondRun");
-      if (!secondRunDataDir.exists()) {
-        if (!secondRunDataDir.mkdir()) {
-          throw new IOException("Unable to create directory" + 
secondRunDataDir.getAbsolutePath());
-        }
-      }
-      String[] args2 = {"-location", secondRunDataDir.getAbsolutePath()};
-      PreUpgradeTool.main(args2);
-
-      scriptFiles = secondRunDataDir.list();
-      assertThat(scriptFiles, is(not(nullValue())));
-      assertThat(scriptFiles.length, is(0));
-
-    } finally {
-      runStatementOnDriver("drop table if exists TAcid");
-      runStatementOnDriver("drop table if exists TAcidPart");
-      runStatementOnDriver("drop table if exists TFlat");
-      runStatementOnDriver("drop table if exists TFlatText");
-    }
-  }
-
-  private static final String INCLUDE_DATABASE_NAME ="DInclude";
-  private static final String EXCLUDE_DATABASE_NAME ="DExclude";
-
-  @Test
-  public void testOnlyFilteredDatabasesAreUpgradedWhenRegexIsGiven() throws 
Exception {
-    int[][] data = {{1, 2}, {3, 4}, {5, 6}};
-    runStatementOnDriver("drop database if exists " + INCLUDE_DATABASE_NAME + 
" cascade");
-    runStatementOnDriver("drop database if exists " + EXCLUDE_DATABASE_NAME + 
" cascade");
-
-    try {
-      runStatementOnDriver("create database " + INCLUDE_DATABASE_NAME);
-      runStatementOnDriver("use " + INCLUDE_DATABASE_NAME);
-      runStatementOnDriver("create table " + INCLUDE_TABLE_NAME + " (a int, b 
int) clustered by (b) " +
-              "into 2 buckets stored as orc TBLPROPERTIES 
('transactional'='true')");
-      runStatementOnDriver("insert into " + INCLUDE_TABLE_NAME + 
makeValuesClause(data));
-      runStatementOnDriver("update " + INCLUDE_TABLE_NAME + " set a = 1 where 
b = 2");
-
-      runStatementOnDriver("create database " + EXCLUDE_DATABASE_NAME);
-      runStatementOnDriver("use " + EXCLUDE_DATABASE_NAME);
-      runStatementOnDriver("create table " + EXCLUDE_DATABASE_NAME + " (a int, 
b int) clustered by (b) " +
-                "into 2 buckets stored as orc TBLPROPERTIES 
('transactional'='true')");
-      runStatementOnDriver("insert into " + EXCLUDE_DATABASE_NAME + 
makeValuesClause(data));
-      runStatementOnDriver("update " + EXCLUDE_DATABASE_NAME + " set a = 1 
where b = 2");
-
-      String[] args = {"-location", getTestDataDir(), "-dbRegex", "*include*"};
-      PreUpgradeTool.callback = new PreUpgradeTool.Callback() {
-        @Override
-        void onWaitForCompaction() throws MetaException {
-          runWorker(hiveConf);
-        }
-      };
-      PreUpgradeTool.pollIntervalMs = 1;
-      PreUpgradeTool.hiveConf = hiveConf;
-      PreUpgradeTool.main(args);
-
-      String[] scriptFiles = getScriptFiles();
-      assertThat(scriptFiles.length, is(1));
-
-      List<String> scriptContent = loadScriptContent(new 
File(getTestDataDir(), scriptFiles[0]));
-      assertThat(scriptContent.size(), is(1));
-      assertThat(scriptContent.get(0), is("ALTER TABLE dinclude.tinclude 
COMPACT 'major';"));
-
-    } finally {
-      runStatementOnDriver("drop database if exists " + INCLUDE_DATABASE_NAME 
+ " cascade");
-      runStatementOnDriver("drop database if exists " + EXCLUDE_DATABASE_NAME 
+ " cascade");
-    }
-  }
-
-  private static final String INCLUDE_TABLE_NAME ="TInclude";
-  private static final String EXCLUDE_TABLE_NAME ="TExclude";
-
-  @Test
-  public void testOnlyFilteredTablesAreUpgradedWhenRegexIsGiven() throws 
Exception {
-    int[][] data = {{1, 2}, {3, 4}, {5, 6}};
-    runStatementOnDriver("drop table if exists " + INCLUDE_TABLE_NAME);
-    runStatementOnDriver("drop table if exists " + EXCLUDE_TABLE_NAME);
-
-    try {
-      runStatementOnDriver("create table " + INCLUDE_TABLE_NAME + " (a int, b 
int) clustered by (b) " +
-              "into 2 buckets stored as orc TBLPROPERTIES 
('transactional'='true')");
-      runStatementOnDriver("create table " + EXCLUDE_TABLE_NAME + " (a int, b 
int) clustered by (b) " +
-              "into 2 buckets stored as orc TBLPROPERTIES 
('transactional'='true')");
-
-      runStatementOnDriver("insert into " + INCLUDE_TABLE_NAME + 
makeValuesClause(data));
-      runStatementOnDriver("update " + INCLUDE_TABLE_NAME + " set a = 1 where 
b = 2");
-
-      runStatementOnDriver("insert into " + EXCLUDE_TABLE_NAME + 
makeValuesClause(data));
-      runStatementOnDriver("update " + EXCLUDE_TABLE_NAME + " set a = 1 where 
b = 2");
-
-      String[] args = {"-location", getTestDataDir(), "-tableRegex", 
"*include*"};
-      PreUpgradeTool.callback = new PreUpgradeTool.Callback() {
-        @Override
-        void onWaitForCompaction() throws MetaException {
-          runWorker(hiveConf);
-        }
-      };
-      PreUpgradeTool.pollIntervalMs = 1;
-      PreUpgradeTool.hiveConf = hiveConf;
-      PreUpgradeTool.main(args);
-
-      String[] scriptFiles = getScriptFiles();
-      assertThat(scriptFiles.length, is(1));
-
-      List<String> scriptContent = loadScriptContent(new 
File(getTestDataDir(), scriptFiles[0]));
-      assertThat(scriptContent.size(), is(1));
-      assertThat(scriptContent.get(0), allOf(
-              containsString("ALTER TABLE"),
-              containsString(INCLUDE_TABLE_NAME.toLowerCase()),
-              containsString("COMPACT")));
-
-    } finally {
-      runStatementOnDriver("drop table if exists " + INCLUDE_TABLE_NAME);
-      runStatementOnDriver("drop table if exists " + EXCLUDE_TABLE_NAME);
-    }
-  }
-
-  private String[] getScriptFiles() {
-    File testDataDir = new File(getTestDataDir());
-    String[] scriptFiles = testDataDir.list((dir, name) -> 
name.startsWith("compacts_") && name.endsWith(".sql"));
-    assertThat(scriptFiles, is(not(nullValue())));
-    return scriptFiles;
-  }
-
-  private List<String> loadScriptContent(File file) throws IOException {
-    List<String> content = org.apache.commons.io.FileUtils.readLines(file);
-    content.removeIf(line -> line.startsWith("--"));
-    content.removeIf(StringUtils::isBlank);
-    return content;
-  }
-
-  @Test
-  public void testUpgradeExternalTableNoReadPermissionForDatabase() throws 
Exception {
-    int[][] data = {{1, 2}, {3, 4}, {5, 6}};
-
-    runStatementOnDriver("drop database if exists test cascade");
-    runStatementOnDriver("drop table if exists TExternal");
-
-    runStatementOnDriver("create database test");
-    runStatementOnDriver("create table test.TExternal (a int, b int) stored as 
orc tblproperties" +
-      "('transactional'='false')");
-
-    //this needs major compaction
-    runStatementOnDriver("insert into test.TExternal" + 
makeValuesClause(data));
-
-    String dbDir = getWarehouseDir() + "/test.db";
-    File dbPath = new File(dbDir);
-    try {
-      Set<PosixFilePermission> perms = 
PosixFilePermissions.fromString("-w-------");
-      Files.setPosixFilePermissions(dbPath.toPath(), perms);
-      String[] args = {"-location", getTestDataDir(), "-execute"};
-      PreUpgradeTool.pollIntervalMs = 1;
-      PreUpgradeTool.hiveConf = hiveConf;
-      Exception expected = null;
-      try {
-        PreUpgradeTool.main(args);
-      } catch (Exception e) {
-        expected = e;
-      }
-
-      Assert.assertNotNull(expected);
-      Assert.assertTrue(expected instanceof HiveException);
-      Assert.assertTrue(expected.getMessage().contains("Pre-upgrade tool 
requires " +
-        "read-access to databases and tables to determine if a table has to be 
compacted."));
-    } finally {
-      Set<PosixFilePermission> perms = 
PosixFilePermissions.fromString("rwxrw----");
-      Files.setPosixFilePermissions(dbPath.toPath(), perms);
-    }
-  }
-
-  @Test
-  public void testUpgradeExternalTableNoReadPermissionForTable() throws 
Exception {
-    int[][] data = {{1, 2}, {3, 4}, {5, 6}};
-    runStatementOnDriver("drop table if exists TExternal");
-
-    runStatementOnDriver("create table TExternal (a int, b int) stored as orc 
tblproperties('transactional'='false')");
-
-    //this needs major compaction
-    runStatementOnDriver("insert into TExternal" + makeValuesClause(data));
-
-    String tableDir = getWarehouseDir() + "/texternal";
-    File tablePath = new File(tableDir);
-    try {
-      Set<PosixFilePermission> perms = 
PosixFilePermissions.fromString("-w-------");
-      Files.setPosixFilePermissions(tablePath.toPath(), perms);
-      String[] args = {"-location", getTestDataDir(), "-execute"};
-      PreUpgradeTool.pollIntervalMs = 1;
-      PreUpgradeTool.hiveConf = hiveConf;
-      Exception expected = null;
-      try {
-        PreUpgradeTool.main(args);
-      } catch (Exception e) {
-        expected = e;
-      }
-
-      Assert.assertNotNull(expected);
-      Assert.assertTrue(expected instanceof HiveException);
-      Assert.assertTrue(expected.getMessage().contains("Pre-upgrade tool 
requires" +
-        " read-access to databases and tables to determine if a table has to 
be compacted."));
-    } finally {
-      Set<PosixFilePermission> perms = 
PosixFilePermissions.fromString("rwxrw----");
-      Files.setPosixFilePermissions(tablePath.toPath(), perms);
-    }
-  }
-
-  @Test
-  public void testConcurrency() throws Exception {
-    int numberOfTables = 20;
-    String tablePrefix = "concurrency_";
-
-    int[][] data = {{1, 2}, {3, 4}, {5, 6}, {7, 8}, {9, 10},
-        {11, 12}, {13, 14}, {15, 16}, {17, 18}, {19, 20}};
-    for (int i = 0; i < numberOfTables; i++) {
-      runStatementOnDriver("drop table if exists " + tablePrefix + i);
-    }
-
-    try {
-      for (int i = 0; i < numberOfTables; i++) {
-        String tableName = tablePrefix + i;
-        runStatementOnDriver(
-                "create table " + tableName + " (a int, b int) " +
-                        "clustered by (b) " +
-                        "into 10 buckets " +
-                        "stored as orc TBLPROPERTIES 
('transactional'='true')");
-        runStatementOnDriver("insert into " + tableName + 
makeValuesClause(data));
-      }
-
-      String[] args = {"-location", getTestDataDir(), "-execute"};
-      PreUpgradeTool.callback = new PreUpgradeTool.Callback() {
-        @Override
-        void onWaitForCompaction() throws MetaException {
-          runWorker(hiveConf);
-        }
-      };
-      PreUpgradeTool.pollIntervalMs = 1;
-      PreUpgradeTool.hiveConf = hiveConf;
-      PreUpgradeTool.main(args);
-
-    } finally {
-      for (int i = 0; i < numberOfTables; i++) {
-        runStatementOnDriver("drop table if exists " + tablePrefix + i);
-      }
-    }
-  }
-
-  private static void runWorker(HiveConf hiveConf) throws MetaException {
-    AtomicBoolean stop = new AtomicBoolean(true);
-    Worker t = new Worker();
-    t.setThreadId((int) t.getId());
-    t.setHiveConf(hiveConf);
-    AtomicBoolean looped = new AtomicBoolean();
-    t.init(stop, looped);
-    t.run();
-  }
-
-  private static String makeValuesClause(int[][] rows) {
-    assert rows.length > 0;
-    StringBuilder sb = new StringBuilder(" values");
-    for(int[] row : rows) {
-      assert row.length > 0;
-      if(row.length > 1) {
-        sb.append("(");
-      }
-      for(int value : row) {
-        sb.append(value).append(",");
-      }
-      sb.setLength(sb.length() - 1);//remove trailing comma
-      if(row.length > 1) {
-        sb.append(")");
-      }
-      sb.append(",");
-    }
-    sb.setLength(sb.length() - 1);//remove trailing comma
-    return sb.toString();
-  }
-
-  private List<String> runStatementOnDriver(String stmt) throws Exception {
-    CommandProcessorResponse cpr = d.run(stmt);
-    if(cpr.getResponseCode() != 0) {
-      throw new RuntimeException(stmt + " failed: " + cpr);
-    }
-    List<String> rs = new ArrayList<String>();
-    d.getResults(rs);
-    return rs;
-  }
-  @Before
-  public void setUp() throws Exception {
-    setUpInternal();
-  }
-  private void initHiveConf() {
-    hiveConf = new HiveConf(this.getClass());
-  }
-  @Rule
-  public TestName testName = new TestName();
-  private HiveConf hiveConf;
-  private Driver d;
-  private void setUpInternal() throws Exception {
-    initHiveConf();
-    TxnDbUtil.cleanDb();//todo: api changed in 3.0
-    FileUtils.deleteDirectory(new File(getTestDataDir()));
-
-    Path workDir = new Path(System.getProperty("test.tmp.dir",
-        "target" + File.separator + "test" + File.separator + "tmp"));
-    hiveConf.set("mapred.local.dir", workDir + File.separator + 
this.getClass().getSimpleName()
-        + File.separator + "mapred" + File.separator + "local");
-    hiveConf.set("mapred.system.dir", workDir + File.separator + 
this.getClass().getSimpleName()
-        + File.separator + "mapred" + File.separator + "system");
-    hiveConf.set("mapreduce.jobtracker.staging.root.dir", workDir + 
File.separator + this.getClass().getSimpleName()
-        + File.separator + "mapred" + File.separator + "staging");
-    hiveConf.set("mapred.temp.dir", workDir + File.separator + 
this.getClass().getSimpleName()
-        + File.separator + "mapred" + File.separator + "temp");
-    hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
-    hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
-    hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, 
getWarehouseDir());
-    hiveConf.setVar(HiveConf.ConfVars.HIVEINPUTFORMAT, 
HiveInputFormat.class.getName());
-    hiveConf
-        .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER,
-            
"org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory");
-    hiveConf
-      .setVar(HiveConf.ConfVars.METASTORE_PRE_EVENT_LISTENERS,
-        
"org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener");
-    hiveConf
-      .setVar(HiveConf.ConfVars.HIVE_METASTORE_AUTHORIZATION_MANAGER,
-        
"org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider");
-    hiveConf.setBoolVar(HiveConf.ConfVars.MERGE_CARDINALITY_VIOLATION_CHECK, 
true);
-    hiveConf.setBoolVar(HiveConf.ConfVars.HIVESTATSCOLAUTOGATHER, false);
-    TxnDbUtil.setConfValues(hiveConf);
-    TxnDbUtil.prepDb();//todo: api changed in 3.0
-    File f = new File(getWarehouseDir());
-    if (f.exists()) {
-      FileUtil.fullyDelete(f);
-    }
-    if (!(new File(getWarehouseDir()).mkdirs())) {
-      throw new RuntimeException("Could not create " + getWarehouseDir());
-    }
-    SessionState ss = SessionState.start(hiveConf);
-    ss.applyAuthorizationPolicy();
-    d = new Driver(new QueryState(hiveConf), null);
-    d.setMaxRows(10000);
-  }
-  private String getWarehouseDir() {
-    return getTestDataDir() + "/warehouse";
-  }
-  @After
-  public void tearDown() throws Exception {
-    if (d != null) {
-      d.close();
-      d.destroy();
-      d = null;
-    }
-  }
-
-}
diff --git 
a/upgrade-acid/pre-upgrade/src/test/java/org/apache/hadoop/hive/upgrade/acid/TestRunOptions.java
 
b/upgrade-acid/pre-upgrade/src/test/java/org/apache/hadoop/hive/upgrade/acid/TestRunOptions.java
deleted file mode 100644
index 8005b5cbc27..00000000000
--- 
a/upgrade-acid/pre-upgrade/src/test/java/org/apache/hadoop/hive/upgrade/acid/TestRunOptions.java
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.upgrade.acid;
-
-import static 
org.apache.hadoop.hive.upgrade.acid.PreUpgradeTool.createCommandLineOptions;
-import static org.hamcrest.core.Is.is;
-import static org.junit.Assert.assertThat;
-
-import org.apache.commons.cli.GnuParser;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-
-public class TestRunOptions {
-
-  @Rule
-  public ExpectedException expectedEx = ExpectedException.none();
-
-  @Test
-  public void testTablePoolSizeIs5WhenSpecified() throws Exception {
-    String[] args = {"-tablePoolSize", "5"};
-    RunOptions runOptions = RunOptions.fromCommandLine(new 
GnuParser().parse(createCommandLineOptions(), args));
-    assertThat(runOptions.getTablePoolSize(), is(5));
-  }
-
-  @Test
-  public void testExceptionIsThrownWhenTablePoolSizeIsNotANumber() throws 
Exception {
-    expectedEx.expect(IllegalArgumentException.class);
-    expectedEx.expectMessage("Please specify a positive integer option value 
for tablePoolSize");
-
-    String[] args = {"-tablePoolSize", "notANumber"};
-    RunOptions.fromCommandLine(new 
GnuParser().parse(createCommandLineOptions(), args));
-  }
-
-  @Test
-  public void testExceptionIsThrownWhenTablePoolSizeIsLessThan1() throws 
Exception {
-    expectedEx.expect(IllegalArgumentException.class);
-    expectedEx.expectMessage("Please specify a positive integer option value 
for tablePoolSize");
-
-    String[] args = {"-tablePoolSize", "0"};
-    RunOptions.fromCommandLine(new 
GnuParser().parse(createCommandLineOptions(), args));
-  }
-
-  @Test
-  public void testExceptionIsThrownWhenTablePoolSizeIsNotInteger() throws 
Exception {
-    expectedEx.expect(IllegalArgumentException.class);
-    expectedEx.expectMessage("Please specify a positive integer option value 
for tablePoolSize");
-
-    String[] args = {"-tablePoolSize", "0.5"};
-    RunOptions.fromCommandLine(new 
GnuParser().parse(createCommandLineOptions(), args));
-  }
-}


Reply via email to