gjacoby126 commented on a change in pull request #520: PHOENIX-5333: A tool to 
upgrade existing tables/indexes to use self-c…
URL: https://github.com/apache/phoenix/pull/520#discussion_r296924355
 
 

 ##########
 File path: 
phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexUpgradeTool.java
 ##########
 @@ -0,0 +1,550 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.mapreduce.index;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.CommandLineParser;
+import org.apache.commons.cli.DefaultParser;
+import org.apache.commons.cli.HelpFormatter;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.ParseException;
+import org.apache.hadoop.conf.Configured;
+
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.CompactionState;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.phoenix.hbase.index.IndexRegionObserver;
+import org.apache.phoenix.hbase.index.Indexer;
+import org.apache.phoenix.hbase.index.covered.NonTxIndexBuilder;
+import org.apache.phoenix.index.GlobalIndexChecker;
+import org.apache.phoenix.index.PhoenixIndexBuilder;
+import org.apache.phoenix.index.PhoenixIndexCodec;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.query.ConnectionQueryServices;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.query.QueryServicesOptions;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.util.PhoenixRuntime;
+
+import java.util.logging.Logger;
+import org.apache.hadoop.conf.Configuration;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Paths;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Properties;
+import java.util.UUID;
+import java.util.logging.FileHandler;
+import java.util.logging.SimpleFormatter;
+
+public class IndexUpgradeTool extends Configured {
+
+    private static final Logger LOGGER = 
Logger.getLogger(IndexUpgradeTool.class.getName());
+
+    private static final Option OPERATION_OPTION = new Option("o", "operation",
+            true,
+            "[Required]Operation to perform (upgrade/rollback)");
+    private static final Option TABLE_OPTION = new Option("tb", "table", true,
+            "[Required]Tables and indexes list ex. 
table1:index1,index2,index3;"
+                    + "table2:index4,index5");
+    private static final Option TABLE_CSV_FILE_OPTION = new Option("f", "file",
+            true,
+            "[Optional]Tables and indexes list in a csv file");
+    private static final Option DRY_RUN_OPTION = new Option("d", "dry-run",
+            false,
+            "[Optional]If passed this will output steps that will be 
executed");
+    private static final Option HELP_OPTION = new Option("h", "help",
+            false, "Help");
+    private static final Option LOG_FILE_OPTION = new Option("lf", "logfile",
+            true,
+            "Log file path where the logs are written");
+    private static final Option HOST_OPTION = new Option("ho", "host",
+            true, "[Required]zookeeper quorum host");
+    private static final Option PORT_OPTION = new Option("po", "port",
+            true, "[Required]zookeeper port");
+    private static final Option COMPACTION_OPTION = new Option("c", 
"compaction",
+            true, "[Optional]enable/disable compaction will be handled by the 
tool");
+
+    public static final String UPGRADE_OP = "upgrade";
+    public static final String ROLLBACK_OP = "rollback";
+
+
+    private HashMap <String, Boolean> tableMC = new HashMap<>();
+    private HashMap<String, ArrayList<String>> tablesAndIndexes;
+    private HashMap<String, String> prop = new  HashMap<>();
+
+    private boolean dryRun, upgrade, compaction;
+    private String connectionURL;
+    private String operation;
+    private String inputTables;
+    private String logFile;
+    private String inputFile;
+
+    public void setDryRun(boolean dryRun) {
+        this.dryRun = dryRun;
+    }
+
+    public void setInputTables(String inputTables) {
+        this.inputTables = inputTables;
+    }
+
+    public void setLogFile(String logFile) {
+        this.logFile = logFile;
+    }
+
+    public void setInputFile(String inputFile) {
+        this.inputFile = inputFile;
+    }
+
+    public boolean getDryRun() {
+        return this.dryRun;
+    }
+
+    public String getInputTables() {
+        return this.inputTables;
+    }
+
+    public String getLogFile() {
+        return this.logFile;
+    }
+
+    public String getOperation() {
+        return operation;
+    }
+
+    public String getConnectionURL() {
+        return connectionURL;
+    }
+
+    public boolean getCompaction() {
+        return compaction;
+    }
+
+    public static void main (String[] args) {
+        CommandLine cmdLine = null;
+
+        IndexUpgradeTool iut = new IndexUpgradeTool();
+        try {
+            cmdLine = iut.parseOptions(args);
+            LOGGER.info("Index Upgrade tool initiated: "+ String.join(",", 
args));
+        } catch (IllegalStateException e) {
+            iut.printHelpAndExit(e.getMessage(), iut.getOptions());
+        }
+        iut.initializeTool(cmdLine);
+        iut.prepareToolSetup();
+        iut.executeTool();
+    }
+
+    public IndexUpgradeTool(String mode, String tables, String inputFile, 
String host, String port,
+            String outputFile, boolean dryRun, boolean compaction) {
+        this.operation = mode;
+        this.inputTables = tables;
+        this.inputFile = inputFile;
+        this.connectionURL = "jdbc:phoenix:" + host + ":" + port;
 
 Review comment:
   any reason we can't use ZK quorum and ConnectionUtil.getInputConnection? 
That lets us use the existing config settings in hbase-site.xml and existing 
JDBC connection string building logic. 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

Reply via email to