catalin-luca commented on a change in pull request #2002: URL: https://github.com/apache/hbase/pull/2002#discussion_r460190107
########## File path: hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFilesJob.java ########## @@ -0,0 +1,348 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.mapreduce; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.Iterators; +import com.google.common.collect.Lists; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.RegionLocator; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.io.HFileLink; +import org.apache.hadoop.hbase.regionserver.StoreFileInfo; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.io.ArrayWritable; +import org.apache.hadoop.io.NullWritable; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.mapreduce.*; +import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat; +import org.apache.hadoop.util.Tool; +import org.apache.hadoop.util.ToolRunner; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.*; + +public class LoadIncrementalHFilesJob extends Configured implements Tool { + + private static final Log LOG = LogFactory.getLog(LoadIncrementalHFilesJob.class); + + public static final String ROOT_DIR = "loadincrementalhfilesjob.root.dir"; + public static final String TABLE_NAME = "loadincrementalhfilesjob.table.name"; + public static final String MAX_MAP_TASK = "loadincrementalhfilesjob.max.map.tasks"; + public static final String DEPTH = "loadincrementalhfilesjob.depth"; + + @Override public int run(String[] args) throws Exception { + Configuration configuration = HBaseConfiguration.create(getConf()); + + return run(args[0], args[1], configuration); + } + + @VisibleForTesting public int run(String rootPath, String table, Configuration configuration) + throws Exception { + Path basePath = new Path(rootPath); + configuration.set(TABLE_NAME, table); + configuration.set(ROOT_DIR, basePath.toString()); + + if (!bulkload(basePath, table, configuration)) { + return 1; + } + + return 0; + } + + private boolean bulkload(Path rootDir, String table, Configuration configuration) + throws Exception { + Configuration jobConfiguration = new Configuration(configuration); + + applyNonOverridableOptions(jobConfiguration); + + Job job = Job.getInstance(jobConfiguration, "Bulkload-" + table + "-" + rootDir.toString()); + + TableMapReduceUtil.addDependencyJars(job); + + job.setJarByClass(LoadIncrementalHFilesJob.class); + + job.setInputFormatClass(BulkLoadInputFormat.class); + job.setOutputFormatClass(NullOutputFormat.class); + + job.setMapperClass(BulkoadMapper.class); + job.setMapOutputKeyClass(NullWritable.class); + job.setMapOutputValueClass(NullWritable.class); + + job.setNumReduceTasks(0); + + return job.waitForCompletion(true); + } + + private static void applyNonOverridableOptions(Configuration configuration) { Review comment: > What if you make BulkloadMapper update mapper context status after calling loadIncrementalHFiles.doBulkloadFromQueue? I don't feel good about completely disabling timeouts, and making it non configurable, this could lead to yarn pool resources being hijacked permanently until an operator manually intervention to kill the slow bulkload job. That's a good point. The `LoadIncrementalHFiles` class should, however, be changed to allow loading a file one by one and re-use the underlying HBase connection. If we just update the mapper context after the call to `loadIncrementalHFiles.doBulkloadFromQueue ` it still won't have any effect because that is the last call in the mapper's lifetime. I'll try and incorporate this change. > Agreed about speculative execution, but not for retries. I would rather re-bulkload a few hfiles for a given failed task, then having to redo the whole bulkload from the scratch. You can't actually re-bulkload a file because the API is not idempotent (the region server moves the file into the region directory, a subsequent call with the same file will throw a file not found error). However, we can add a file existence check on the mapper side so a map task can be re-tried. Does that sound good? > And how about mapreduce.task.classpath.user.precedence? Is it to make sure this version of LoadIncrementalHFiles gets into the task classpath, when running on clusters that didn't have this patch? Am fine with it, if that's the case, just wanted to confirm. That's just a setting particular to a setup I used for testing. I'll remove it and allow the client to specify it, if needed. ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [email protected]
