This is an automated email from the ASF dual-hosted git repository.
kgyrtkirk pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git
The following commit(s) were added to refs/heads/master by this push:
new adea623 HIVE-24390: Spelling common (#2810) (Josh Soref reviewed by
Zoltan Haindrich)
adea623 is described below
commit adea623294fd0a1a1502abc441794a61739cf8dc
Author: Josh Soref <[email protected]>
AuthorDate: Thu Mar 3 07:33:33 2022 -0500
HIVE-24390: Spelling common (#2810) (Josh Soref reviewed by Zoltan
Haindrich)
---
common/pom.xml | 2 +-
.../hadoop/hive/common/CompressionUtils.java | 10 +++---
.../org/apache/hadoop/hive/common/JvmMetrics.java | 2 +-
.../apache/hadoop/hive/common/JvmPauseMonitor.java | 2 +-
.../org/apache/hadoop/hive/common/LogUtils.java | 2 +-
.../apache/hadoop/hive/common/jsonexplain/Op.java | 12 +++----
.../hadoop/hive/common/jsonexplain/Stage.java | 34 +++++++++---------
.../hadoop/hive/common/jsonexplain/Vertex.java | 8 ++---
.../apache/hadoop/hive/common/type/Decimal128.java | 6 ++--
.../apache/hadoop/hive/common/type/HiveChar.java | 2 +-
.../hadoop/hive/common/type/SqlMathUtil.java | 2 +-
.../java/org/apache/hadoop/hive/conf/HiveConf.java | 42 +++++++++++-----------
.../java/org/apache/hadoop/hive/ql/ErrorMsg.java | 14 ++++----
.../apache/hive/common/util/HiveStringUtils.java | 10 +++---
.../java/org/apache/hive/http/JMXJsonServlet.java | 2 +-
.../hive/http/Log4j2ConfiguratorServlet.java | 10 +++---
.../java/org/apache/hive/http/ProfileServlet.java | 2 +-
.../hadoop/hive/common/jsonexplain/TestStage.java | 42 +++++++++++-----------
.../hadoop/hive/common/jsonexplain/TestVertex.java | 2 +-
.../hadoop/hive/common/type/TestDecimal128.java | 16 ++++-----
.../hadoop/hive/conf/TestHiveConfRestrictList.java | 2 +-
.../hive/common/util/TestHiveStringUtils.java | 2 +-
.../clientnegative/stats_aggregator_error_2.q.out | 10 +++---
.../clientnegative/stats_publisher_error_1.q.out | 8 ++---
.../clientnegative/stats_publisher_error_2.q.out | 10 +++---
.../llap/stats_aggregator_error_1.q.out | 2 +-
.../llap/stats_publisher_error_1.q.out | 2 +-
.../hadoop/hive/common/metrics/common/Metrics.java | 4 +--
.../metrics/metrics2/JsonFileMetricsReporter.java | 2 +-
29 files changed, 132 insertions(+), 132 deletions(-)
diff --git a/common/pom.xml b/common/pom.xml
index 912de29..f89f273 100644
--- a/common/pom.xml
+++ b/common/pom.xml
@@ -28,7 +28,7 @@
</properties>
<dependencies>
<!-- dependencies are always listed in sorted order by groupId, artifactId
-->
- <!-- intra-proect -->
+ <!-- intra-project -->
<dependency>
<groupId>org.apache.hive</groupId>
<artifactId>hive-classification</artifactId>
diff --git
a/common/src/java/org/apache/hadoop/hive/common/CompressionUtils.java
b/common/src/java/org/apache/hadoop/hive/common/CompressionUtils.java
index a2e90e1..c5c050b 100644
--- a/common/src/java/org/apache/hadoop/hive/common/CompressionUtils.java
+++ b/common/src/java/org/apache/hadoop/hive/common/CompressionUtils.java
@@ -117,7 +117,7 @@ public class CompressionUtils {
* @throws IOException
* @throws FileNotFoundException
*
- * @return The {@link List} of {@link File}s with the untared content.
+ * @return The {@link List} of {@link File}s with the untarred content.
* @throws ArchiveException
*/
public static List<File> unTar(final String inputFileName, final String
outputDirName)
@@ -136,7 +136,7 @@ public class CompressionUtils {
* @throws IOException
* @throws FileNotFoundException
*
- * @return The {@link List} of {@link File}s with the untared content.
+ * @return The {@link List} of {@link File}s with the untarred content.
* @throws ArchiveException
*/
public static List<File> unTar(final String inputFileName, final String
outputDirName,
@@ -145,7 +145,7 @@ public class CompressionUtils {
File inputFile = new File(inputFileName);
File outputDir = new File(outputDirName);
- final List<File> untaredFiles = new LinkedList<File>();
+ final List<File> untarredFiles = new LinkedList<File>();
InputStream is = null;
try {
@@ -200,10 +200,10 @@ public class CompressionUtils {
IOUtils.copy(debInputStream, outputFileStream);
outputFileStream.close();
}
- untaredFiles.add(outputFile);
+ untarredFiles.add(outputFile);
}
debInputStream.close();
- return untaredFiles;
+ return untarredFiles;
} finally {
if (is != null) is.close();
diff --git a/common/src/java/org/apache/hadoop/hive/common/JvmMetrics.java
b/common/src/java/org/apache/hadoop/hive/common/JvmMetrics.java
index b758abe..6edf396 100644
--- a/common/src/java/org/apache/hadoop/hive/common/JvmMetrics.java
+++ b/common/src/java/org/apache/hadoop/hive/common/JvmMetrics.java
@@ -128,7 +128,7 @@ public class JvmMetrics implements MetricsSource {
if (pauseMonitor != null) {
rb.addCounter(GcNumWarnThresholdExceeded,
- pauseMonitor.getNumGcWarnThreadholdExceeded());
+ pauseMonitor.getNumGcWarnThresholdExceeded());
rb.addCounter(GcNumInfoThresholdExceeded,
pauseMonitor.getNumGcInfoThresholdExceeded());
rb.addCounter(GcTotalExtraSleepTime,
diff --git a/common/src/java/org/apache/hadoop/hive/common/JvmPauseMonitor.java
b/common/src/java/org/apache/hadoop/hive/common/JvmPauseMonitor.java
index 3c988da..d98153b 100644
--- a/common/src/java/org/apache/hadoop/hive/common/JvmPauseMonitor.java
+++ b/common/src/java/org/apache/hadoop/hive/common/JvmPauseMonitor.java
@@ -96,7 +96,7 @@ public class JvmPauseMonitor {
return monitorThread != null;
}
- public long getNumGcWarnThreadholdExceeded() {
+ public long getNumGcWarnThresholdExceeded() {
return numGcWarnThresholdExceeded;
}
diff --git a/common/src/java/org/apache/hadoop/hive/common/LogUtils.java
b/common/src/java/org/apache/hadoop/hive/common/LogUtils.java
index 89c991d..f7571c6 100644
--- a/common/src/java/org/apache/hadoop/hive/common/LogUtils.java
+++ b/common/src/java/org/apache/hadoop/hive/common/LogUtils.java
@@ -113,7 +113,7 @@ public class LogUtils {
return initHiveLog4jDefault(
conf, "Not able to find conf file: " + log4jConfigFile, confVarName);
} else {
- // property speficied file found in local file system
+ // property specified file found in local file system
// use the specified file
if (confVarName == HiveConf.ConfVars.HIVE_EXEC_LOG4J_FILE) {
String queryId = HiveConf.getVar(conf,
HiveConf.ConfVars.HIVEQUERYID);
diff --git a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/Op.java
b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/Op.java
index f7c2908..2bd1963 100644
--- a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/Op.java
+++ b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/Op.java
@@ -93,9 +93,9 @@ public final class Op {
// get the map for posToVertex
Map<String, Vertex> posToVertex = new LinkedHashMap<>();
if (opObject.has("input vertices:")) {
- JSONObject verticeObj = opObject.getJSONObject("input vertices:");
- for (String pos : JSONObject.getNames(verticeObj)) {
- String vertexName = verticeObj.getString(pos);
+ JSONObject vertexObj = opObject.getJSONObject("input vertices:");
+ for (String pos : JSONObject.getNames(vertexObj)) {
+ String vertexName = vertexObj.getString(pos);
// update the connection
Connection c = null;
for (Connection connection : vertex.parentConnections) {
@@ -195,7 +195,7 @@ public final class Op {
// should be merge join
else {
Map<String, String> posToOpId = new LinkedHashMap<>();
- if (vertex.mergeJoinDummyVertexs.isEmpty()) {
+ if (vertex.mergeJoinDummyVertices.isEmpty()) {
for (Entry<String, String> entry : vertex.tagToInput.entrySet()) {
Connection c = null;
for (Connection connection : vertex.parentConnections) {
@@ -226,7 +226,7 @@ public final class Op {
}
} else {
posToOpId.put(vertex.tag, this.parent.operatorId);
- for (Vertex v : vertex.mergeJoinDummyVertexs) {
+ for (Vertex v : vertex.mergeJoinDummyVertices) {
if (v.outputOps.size() != 1) {
throw new Exception("Can not find a single root operators in a
single vertex " + v.name
+ " when hive explain user is trying to identify the operator
id.");
@@ -244,7 +244,7 @@ public final class Op {
}
}
// inline merge join operator in a self-join
- for (Vertex v : this.vertex.mergeJoinDummyVertexs) {
+ for (Vertex v : this.vertex.mergeJoinDummyVertices) {
parser.addInline(this, new Connection(null, v));
}
}
diff --git
a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/Stage.java
b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/Stage.java
index ec39bd4..f2b3273 100644
--- a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/Stage.java
+++ b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/Stage.java
@@ -42,7 +42,7 @@ public final class Stage {
public final List<Stage> parentStages = new ArrayList<>();
// downstream stages.
public final List<Stage> childStages = new ArrayList<>();
- public final Map<String, Vertex> vertexs =new LinkedHashMap<>();
+ public final Map<String, Vertex> vertices =new LinkedHashMap<>();
public final Map<String, String> attrs = new TreeMap<>();
Map<Vertex, List<Connection>> tezStageDependency;
// some stage may contain only a single operator, e.g., create table
operator,
@@ -81,32 +81,32 @@ public final class Stage {
* @param object
* @throws Exception
* If the object of stage contains "Tez", we need to extract the
- * vertices and edges Else we need to directly extract operators
+ * jsonVertices and edges Else we need to directly extract
operators
* and/or attributes.
*/
public void extractVertex(JSONObject object) throws Exception {
if (object.has(this.parser.getFrameworkName())) {
this.tezStageDependency = new TreeMap<>();
JSONObject tez = (JSONObject) object.get(this.parser.getFrameworkName());
- JSONObject vertices = tez.getJSONObject("Vertices:");
+ JSONObject jsonVertices = tez.getJSONObject("Vertices:");
if (tez.has("Edges:")) {
JSONObject edges = tez.getJSONObject("Edges:");
- // iterate for the first time to get all the vertices
+ // iterate for the first time to get all the jsonVertices
for (String to : JSONObject.getNames(edges)) {
- vertexs.put(to, new Vertex(to, vertices.getJSONObject(to), this,
parser));
+ vertices.put(to, new Vertex(to, jsonVertices.getJSONObject(to),
this, parser));
}
// iterate for the second time to get all the vertex dependency
for (String to : JSONObject.getNames(edges)) {
Object o = edges.get(to);
- Vertex v = vertexs.get(to);
+ Vertex v = vertices.get(to);
// 1 to 1 mapping
if (o instanceof JSONObject) {
JSONObject obj = (JSONObject) o;
String parent = obj.getString("parent");
- Vertex parentVertex = vertexs.get(parent);
+ Vertex parentVertex = vertices.get(parent);
if (parentVertex == null) {
- parentVertex = new Vertex(parent,
vertices.getJSONObject(parent), this, parser);
- vertexs.put(parent, parentVertex);
+ parentVertex = new Vertex(parent,
jsonVertices.getJSONObject(parent), this, parser);
+ vertices.put(parent, parentVertex);
}
String type = obj.getString("type");
// for union vertex, we reverse the dependency relationship
@@ -126,10 +126,10 @@ public final class Stage {
for (int index = 0; index < from.length(); index++) {
JSONObject obj = from.getJSONObject(index);
String parent = obj.getString("parent");
- Vertex parentVertex = vertexs.get(parent);
+ Vertex parentVertex = vertices.get(parent);
if (parentVertex == null) {
- parentVertex = new Vertex(parent,
vertices.getJSONObject(parent), this, parser);
- vertexs.put(parent, parentVertex);
+ parentVertex = new Vertex(parent,
jsonVertices.getJSONObject(parent), this, parser);
+ vertices.put(parent, parentVertex);
}
String type = obj.getString("type");
if (!"CONTAINS".equals(type)) {
@@ -146,19 +146,19 @@ public final class Stage {
}
}
} else {
- for (String vertexName : JSONObject.getNames(vertices)) {
- vertexs.put(vertexName, new Vertex(vertexName,
vertices.getJSONObject(vertexName), this, parser));
+ for (String vertexName : JSONObject.getNames(jsonVertices)) {
+ vertices.put(vertexName, new Vertex(vertexName,
jsonVertices.getJSONObject(vertexName), this, parser));
}
}
// iterate for the first time to extract opTree in vertex
- for (Vertex v : vertexs.values()) {
+ for (Vertex v : vertices.values()) {
if (v.vertexType == VertexType.MAP || v.vertexType ==
VertexType.REDUCE) {
v.extractOpTree();
}
}
// iterate for the second time to rewrite object
- for (Vertex v : vertexs.values()) {
+ for (Vertex v : vertices.values()) {
v.checkMultiReduceOperator(parser.rewriteObject);
}
} else {
@@ -245,7 +245,7 @@ public final class Stage {
printer.println(DagJsonParser.prefixString(indentFlag) + externalName);
// print vertexes
indentFlag++;
- for (Vertex candidate : this.vertexs.values()) {
+ for (Vertex candidate : this.vertices.values()) {
if (!parser.isInline(candidate) && candidate.children.isEmpty()) {
candidate.print(printer, indentFlag, null, null);
}
diff --git
a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/Vertex.java
b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/Vertex.java
index 6773912..7771d37 100644
--- a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/Vertex.java
+++ b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/Vertex.java
@@ -53,7 +53,7 @@ public final class Vertex implements Comparable<Vertex>{
public final List<Op> inputOps= new ArrayList<>();
// we create a dummy vertex for a mergejoin branch for a self join if this
// vertex is a mergejoin
- public final List<Vertex> mergeJoinDummyVertexs = new ArrayList<>();
+ public final List<Vertex> mergeJoinDummyVertices = new ArrayList<>();
// this vertex has multiple reduce operators
public int numReduceOp = 0;
// execution mode
@@ -121,7 +121,7 @@ public final class Vertex implements Comparable<Vertex>{
Vertex v = new Vertex(null, mpOpTree, this.stage, parser);
v.extractOpTree();
v.dummy = true;
- mergeJoinDummyVertexs.add(v);
+ mergeJoinDummyVertices.add(v);
}
} else if (key.equals("Merge File Operator")) {
JSONObject opTree = vertexObject.getJSONObject(key);
@@ -254,7 +254,7 @@ public final class Vertex implements Comparable<Vertex>{
}
}
if (vertexType == VertexType.UNION) {
- // print dependent vertexs
+ // print dependent vertices
indentFlag++;
for (int index = 0; index < this.parentConnections.size(); index++) {
Connection connection = this.parentConnections.get(index);
@@ -274,7 +274,7 @@ public final class Vertex implements Comparable<Vertex>{
for (Op op : this.outputOps) {
if (op.type == OpType.RS) {
if (rewriteObject) {
- Vertex outputVertex = this.stage.vertexs.get(op.outputVertexName);
+ Vertex outputVertex = this.stage.vertices.get(op.outputVertexName);
if (outputVertex != null && outputVertex.inputOps.size() > 0) {
JSONArray array = new JSONArray();
for (Op inputOp : outputVertex.inputOps) {
diff --git a/common/src/java/org/apache/hadoop/hive/common/type/Decimal128.java
b/common/src/java/org/apache/hadoop/hive/common/type/Decimal128.java
index 194fc14..7d4eaf7 100644
--- a/common/src/java/org/apache/hadoop/hive/common/type/Decimal128.java
+++ b/common/src/java/org/apache/hadoop/hive/common/type/Decimal128.java
@@ -703,11 +703,11 @@ public final class Decimal128 extends Number implements
Comparable<Decimal128> {
} else if (str[cursor] == 'e' || str[cursor] == 'E') {
// exponent part
++cursor;
- boolean exponentNagative = false;
+ boolean exponentNegative = false;
if (str[cursor] == '+') {
++cursor;
} else if (str[cursor] == '-') {
- exponentNagative = true;
+ exponentNegative = true;
++cursor;
}
while (cursor < end) {
@@ -718,7 +718,7 @@ public final class Decimal128 extends Number implements
Comparable<Decimal128> {
}
++cursor;
}
- if (exponentNagative) {
+ if (exponentNegative) {
exponent = -exponent;
}
} else {
diff --git a/common/src/java/org/apache/hadoop/hive/common/type/HiveChar.java
b/common/src/java/org/apache/hadoop/hive/common/type/HiveChar.java
index f4600a3..9b5da77 100644
--- a/common/src/java/org/apache/hadoop/hive/common/type/HiveChar.java
+++ b/common/src/java/org/apache/hadoop/hive/common/type/HiveChar.java
@@ -22,7 +22,7 @@ import org.apache.commons.lang3.StringUtils;
/**
* HiveChar.
* String values will be padded to full char length.
- * Character legnth, comparison, hashCode should ignore trailing spaces.
+ * Character length, comparison, hashCode should ignore trailing spaces.
*/
public class HiveChar extends HiveBaseChar
implements Comparable<HiveChar> {
diff --git
a/common/src/java/org/apache/hadoop/hive/common/type/SqlMathUtil.java
b/common/src/java/org/apache/hadoop/hive/common/type/SqlMathUtil.java
index 57ccce0..a1bf859 100644
--- a/common/src/java/org/apache/hadoop/hive/common/type/SqlMathUtil.java
+++ b/common/src/java/org/apache/hadoop/hive/common/type/SqlMathUtil.java
@@ -275,7 +275,7 @@ public final class SqlMathUtil {
// intrinsics in Java 8)
// an equivalent algorithm exists in
- // com.google.common.primitives.UnsingedLongs
+ // com.google.common.primitives.UnsignedLongs
long quotient = ((dividend >>> 1L) / divisor) << 1L;
long remainder = dividend - quotient * divisor;
if (compareUnsignedLong(remainder, divisor) >= 0) {
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 8879da4..ce8ca7b 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -464,7 +464,7 @@ public class HiveConf extends Configuration {
}
/**
- * Get a set containing configuration parameter names used by LLAP Server
isntances
+ * Get a set containing configuration parameter names used by LLAP Server
instances
* @return an unmodifiable set containing llap ConfVars
*/
public static final Set<String> getLlapDaemonConfVars() {
@@ -522,7 +522,7 @@ public class HiveConf extends Configuration {
"Root dir for ChangeManager for non encrypted paths if
hive.repl.cmrootdir is encrypted."),
REPLCMINTERVAL("hive.repl.cm.interval","3600s",
new TimeValidator(TimeUnit.SECONDS),
- "Inteval for cmroot cleanup thread."),
+ "Interval for cmroot cleanup thread."),
REPL_HA_DATAPATH_REPLACE_REMOTE_NAMESERVICE("hive.repl.ha.datapath.replace.remote.nameservice",
false,
"When HDFS is HA enabled and both source and target clusters are
configured with same nameservice name," +
"enable this flag and provide a new unique logical name
for representing the remote cluster " +
@@ -549,7 +549,7 @@ public class HiveConf extends Configuration {
"If this is set to false, then all previously used
dump-directories will be deleted after repl-dump. " +
"If true, a number of latest dump-directories specified by
hive.repl.retain.prev.dump.dir.count will be retained"),
REPL_RETAIN_PREV_DUMP_DIR_COUNT("hive.repl.retain.prev.dump.dir.count", 3,
- "Indicates maximium number of latest previously used
dump-directories which would be retained when " +
+ "Indicates maximum number of latest previously used
dump-directories which would be retained when " +
"hive.repl.retain.prev.dump.dir is set to true"),
REPL_RETAIN_CUSTOM_LOCATIONS_FOR_DB_ON_TARGET("hive.repl.retain.custom.db.locations.on.target",
true,
"Indicates if source database has custom warehouse locations,
whether that should be retained on target as well"),
@@ -606,7 +606,7 @@ public class HiveConf extends Configuration {
+ "requirements can be fulfilled. If any specific configuration needs
to be passed for these copy task it can"
+ " be specified using the prefix hive.dbpath."),
REPL_EXTERNAL_WAREHOUSE_SINGLE_COPY_TASK_PATHS("hive.repl.external.warehouse.single.copy.task.paths",
- "", "Comma seperated list of paths for which single copy task shall be
created for all the external tables "
+ "", "Comma separated list of paths for which single copy task shall be
created for all the external tables "
+ "within the locations Would require more memory for preparing the
initial listing, Should be used if the memory "
+ "requirements can be fulfilled. If the directory contains data not
part of the database, that data would "
+ "also get copied, so only locations which contains tables only
belonging to the same database should be "
@@ -747,7 +747,7 @@ public class HiveConf extends Configuration {
"org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface."),
QUERYREDACTORHOOKS("hive.exec.query.redactor.hooks", "",
"Comma-separated list of hooks to be invoked for each query which can
\n" +
- "tranform the query before it's placed in the job.xml file. Must be a
Java class which \n" +
+ "transform the query before it's placed in the job.xml file. Must be a
Java class which \n" +
"extends from the org.apache.hadoop.hive.ql.hooks.Redactor abstract
class."),
CLIENTSTATSPUBLISHERS("hive.client.stats.publishers", "",
"Comma-separated list of statistics publishers to be invoked on
counters on each job. \n" +
@@ -850,7 +850,7 @@ public class HiveConf extends Configuration {
"When hive.exec.mode.local.auto is true, the number of tasks should
less than this for local mode."),
DROP_IGNORES_NON_EXISTENT("hive.exec.drop.ignorenonexistent", true,
- "Do not report an error if DROP TABLE/VIEW/Index/Function specifies a
non-existent table/view/function"),
+ "Do not report an error if DROP TABLE/VIEW/Index/Function specifies a
nonexistent table/view/function"),
HIVEIGNOREMAPJOINHINT("hive.ignore.mapjoin.hint", true, "Ignore the
mapjoin hint"),
@@ -2570,7 +2570,7 @@ public class HiveConf extends Configuration {
"Whether to eliminate scans of the tables from which no columns are
selected. Note\n" +
"that, when selecting from empty tables with data files, this can
produce incorrect\n" +
"results, so it's disabled by default. It works correctly for normal
tables."),
- HIVENULLSCANOPTIMIZE("hive.optimize.null.scan", true, "Dont scan relations
which are guaranteed to not generate any rows"),
+ HIVENULLSCANOPTIMIZE("hive.optimize.null.scan", true, "Don't scan
relations which are guaranteed to not generate any rows"),
HIVEOPTPPD_STORAGE("hive.optimize.ppd.storage", true,
"Whether to push predicates down to storage handlers"),
HIVEOPTGROUPBY("hive.optimize.groupby", true,
@@ -3694,7 +3694,7 @@ public class HiveConf extends Configuration {
"any extra EXPLAIN configuration (e.g. hive.spark.explain.user, etc.).
The explain " +
"plan for each stage is truncated at 100,000 characters."),
- // prefix used to auto generated column aliases (this should be s,tarted
with '_')
+ // prefix used to auto generated column aliases (this should be started
with '_')
HIVE_AUTOGEN_COLUMNALIAS_PREFIX_LABEL("hive.autogen.columnalias.prefix.label",
"_c",
"String used as a prefix when auto generating column alias.\n" +
"By default the prefix label will be appended with a column position
number to form the column alias. \n" +
@@ -3718,7 +3718,7 @@ public class HiveConf extends Configuration {
HIVE_METRICS_REPORTER("hive.service.metrics.reporter", "",
"Reporter implementations for metric class "
+
"org.apache.hadoop.hive.common.metrics.metrics2.CodahaleMetrics;" +
- "Deprecated, use HIVE_CODAHALE_METRICS_REPORTER_CLASSES instead. This
configuraiton will be"
+ "Deprecated, use HIVE_CODAHALE_METRICS_REPORTER_CLASSES instead. This
configuration will be"
+ " overridden by HIVE_CODAHALE_METRICS_REPORTER_CLASSES if
present. " +
"Comma separated list of JMX, CONSOLE, JSON_FILE, HADOOP2"),
HIVE_METRICS_JSON_FILE_LOCATION("hive.service.metrics.file.location",
"/tmp/report.json",
@@ -3746,7 +3746,7 @@ public class HiveConf extends Configuration {
"To hold a lock file in scratchdir to prevent to be removed by
cleardanglingscratchdir"),
HIVE_INSERT_INTO_MULTILEVEL_DIRS("hive.insert.into.multilevel.dirs", false,
"Where to insert into multilevel directories like\n" +
- "\"insert directory '/HIVEFT25686/chinna/' from table\""),
+ "\"insert directory '/HIVEFT25686/china/' from table\""),
HIVE_CTAS_EXTERNAL_TABLES("hive.ctas.external.tables", true,
"whether CTAS for external tables is allowed"),
HIVE_INSERT_INTO_EXTERNAL_TABLES("hive.insert.into.external.tables", true,
@@ -3814,7 +3814,7 @@ public class HiveConf extends Configuration {
"indicates whether the connection is authenticated before the
requests lands on HiveServer2, So that we can" +
"avoid the authentication is again in HS2. Default value is empty,
if it's value is set to some header say " +
"'X-Trusted-Proxy-Auth-Header' then we need to look for this
header in the connection string, if present " +
- "we directly extarct the client name from header."),
+ "we directly extract the client name from header."),
// HiveServer2 global init file location
HIVE_SERVER2_GLOBAL_INIT_FILE_LOCATION("hive.server2.global.init.file.location",
"${env:HIVE_CONF_DIR}",
@@ -4752,7 +4752,7 @@ public class HiveConf extends Configuration {
+ " 1: recommended value: there is only 1 merger thread
(additionally to the task's main thread),"
+ "according perf tests, this can lead to serious improvement \n"),
TEZ_BIGTABLE_MIN_SIZE_SEMIJOIN_REDUCTION("hive.tez.bigtable.minsize.semijoin.reduction",
100000000L,
- "Big table for runtime filteting should be of atleast this size"),
+ "Big table for runtime filtering should be of atleast this size"),
TEZ_DYNAMIC_SEMIJOIN_REDUCTION_THRESHOLD("hive.tez.dynamic.semijoin.reduction.threshold",
(float) 0.50,
"Only perform semijoin optimization if the estimated benefit at or
above this fraction of the target table"),
TEZ_DYNAMIC_SEMIJOIN_REDUCTION_MULTICOLUMN(
@@ -4893,7 +4893,7 @@ public class HiveConf extends Configuration {
"is unneeded. This is only necessary for ORC files written before
HIVE-9660."),
LLAP_CACHE_HYDRATION_STRATEGY_CLASS("hive.llap.cache.hydration.strategy.class",
"", "Strategy class for managing the "
+ "llap cache hydration. It's executed when the daemon starts and
stops, and gives a chance to save and/or "
- + "load the contens of the llap cache. If left empty the feature is
disabled.\n" +
+ + "load the contents of the llap cache. If left empty the feature is
disabled.\n" +
"The class should implement
org.apache.hadoop.hive.llap.LlapCacheHydration interface."),
LLAP_CACHE_HYDRATION_SAVE_DIR("hive.llap.cache.hydration.save.dir",
"/tmp/hive", "Directory to save the llap cache content\n"
+ "info on shutdown, if BasicLlapCacheHydration is used as the
hive.llap.cache.hydration.strategy.class."),
@@ -4923,7 +4923,7 @@ public class HiveConf extends Configuration {
"inputs into LLAP cache, if this feature is enabled."),
LLAP_IO_ENCODE_SLICE_LRR("hive.llap.io.encode.slice.lrr", true,
"Whether to separate cache slices when reading encoded data from text
inputs via MR\n" +
- "MR LineRecordRedader into LLAP cache, if this feature is enabled.
Safety flag."),
+ "MR LineRecordReader into LLAP cache, if this feature is enabled.
Safety flag."),
LLAP_ORC_ENABLE_TIME_COUNTERS("hive.llap.io.orc.time.counters", true,
"Whether to enable time counters for LLAP IO layer (time spent in
HDFS, etc.)"),
LLAP_IO_VRB_QUEUE_LIMIT_MAX("hive.llap.io.vrb.queue.limit.max", 50000,
@@ -5179,25 +5179,25 @@ public class HiveConf extends Configuration {
"Specifies the minimum amount of tasks, executed by a particular LLAP
daemon, before the health\n" +
"status of the node is examined."),
LLAP_NODEHEALTHCHECKS_MININTERVALDURATION(
- "hive.llap.nodehealthckecks.minintervalduration", "300s",
+ "hive.llap.nodehealthchecks.minintervalduration", "300s",
new TimeValidator(TimeUnit.SECONDS),
"The minimum time that needs to elapse between two actions that are the
correcting results of identifying\n" +
"an unhealthy node. Even if additional nodes are considered to be
unhealthy, no action is performed until\n" +
"this time interval has passed since the last corrective action."),
LLAP_NODEHEALTHCHECKS_TASKTIMERATIO(
- "hive.llap.nodehealthckecks.tasktimeratio", 1.5f,
+ "hive.llap.nodehealthchecks.tasktimeratio", 1.5f,
"LLAP daemons are considered unhealthy, if their average (Map-) task
execution time is significantly larger\n" +
"than the average task execution time of other nodes. This value
specifies the ratio of a node to other\n" +
"nodes, which is considered as threshold for unhealthy. A value of 1.5
for example considers a node to be\n" +
"unhealthy if its average task execution time is 50% larger than the
average of other nodes."),
LLAP_NODEHEALTHCHECKS_EXECUTORRATIO(
- "hive.llap.nodehealthckecks.executorratio", 2.0f,
+ "hive.llap.nodehealthchecks.executorratio", 2.0f,
"If an unhealthy node is identified, it is blacklisted only where there
is enough free executors to execute\n" +
"the tasks. This value specifies the ratio of the free executors
compared to the blacklisted ones.\n" +
"A value of 2.0 for example defines that we blacklist an unhealthy node
only if we have 2 times more\n" +
"free executors on the remaining nodes than the unhealthy node."),
LLAP_NODEHEALTHCHECKS_MAXNODES(
- "hive.llap.nodehealthckecks.maxnodes", 1,
+ "hive.llap.nodehealthchecks.maxnodes", 1,
"The maximum number of blacklisted nodes. If there are at least this
number of blacklisted nodes\n" +
"the listener will not blacklist further nodes even if all the
conditions are met."),
LLAP_TASK_SCHEDULER_AM_REGISTRY_NAME("hive.llap.task.scheduler.am.registry",
"llap",
@@ -5242,7 +5242,7 @@ public class HiveConf extends Configuration {
"hive.llap.daemon.task.preemption.metrics.intervals", "30,60,300",
"Comma-delimited set of integers denoting the desired rollover
intervals (in seconds)\n" +
" for percentile latency metrics. Used by LLAP daemon task scheduler
metrics for\n" +
- " time taken to kill task (due to pre-emption) and useful time wasted
by the task that\n" +
+ " time taken to kill task (due to preemption) and useful time wasted
by the task that\n" +
" is about to be preempted."
),
LLAP_DAEMON_TASK_SCHEDULER_WAIT_QUEUE_SIZE("hive.llap.daemon.task.scheduler.wait.queue.size",
@@ -5622,7 +5622,7 @@ public class HiveConf extends Configuration {
+ "and execution is also bound to the namespace"),
HIVE_SCHEDULED_QUERIES_EXECUTOR_IDLE_SLEEP_TIME("hive.scheduled.queries.executor.idle.sleep.time",
"60s",
new TimeValidator(TimeUnit.SECONDS),
- "Time to sleep between quering for the presence of a scheduled
query."),
+ "Time to sleep between querying for the presence of a scheduled
query."),
HIVE_SCHEDULED_QUERIES_EXECUTOR_PROGRESS_REPORT_INTERVAL("hive.scheduled.queries.executor.progress.report.interval",
"60s",
new TimeValidator(TimeUnit.SECONDS),
@@ -5678,7 +5678,7 @@ public class HiveConf extends Configuration {
HIVE_NOTFICATION_EVENT_CONSUMERS("hive.notification.event.consumers",
"org.apache.hadoop.hive.ql.cache.results.QueryResultsCache$InvalidationEventConsumer",
"Comma-separated list of class names extending EventConsumer," +
- "to handle the NotificationEvents retreived by the notification event
poll."),
+ "to handle the NotificationEvents retrieved by the notification event
poll."),
HIVE_DESCRIBE_PARTITIONED_TABLE_IGNORE_STATS("hive.describe.partitionedtable.ignore.stats",
false,
"Disable partitioned table stats collection for 'DESCRIBE FORMATTED'
or 'DESCRIBE EXTENDED' commands."),
diff --git a/common/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
b/common/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
index 927650f..7f13403 100644
--- a/common/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
+++ b/common/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
@@ -146,7 +146,7 @@ public enum ErrorMsg {
UDTF_NO_GROUP_BY(10077, "GROUP BY is not supported with a UDTF in the SELECT
clause"),
UDTF_NO_SORT_BY(10078, "SORT BY is not supported with a UDTF in the SELECT
clause"),
UDTF_NO_CLUSTER_BY(10079, "CLUSTER BY is not supported with a UDTF in the
SELECT clause"),
- UDTF_NO_DISTRIBUTE_BY(10080, "DISTRUBTE BY is not supported with a UDTF in
the SELECT clause"),
+ UDTF_NO_DISTRIBUTE_BY(10080, "DISTRIBUTE BY is not supported with a UDTF in
the SELECT clause"),
UDTF_INVALID_LOCATION(10081, "UDTF's are not supported outside the SELECT
clause, nor nested "
+ "in expressions"),
UDTF_LATERAL_VIEW(10082, "UDTF's cannot be in a select expression when there
is a lateral view"),
@@ -218,7 +218,7 @@ public enum ErrorMsg {
BUCKET_MAPJOIN_NOT_POSSIBLE(10136,
"Bucketed mapjoin cannot be performed. " +
"This can be due to multiple reasons: " +
- " . Join columns dont match bucketed columns. " +
+ " . Join columns don't match bucketed columns. " +
" . Number of buckets are not a multiple of each other. " +
"If you really want to perform the operation, either remove the " +
"mapjoin hint from your query or set hive.enforce.bucketmapjoin to
false."),
@@ -523,23 +523,23 @@ public enum ErrorMsg {
STATSPUBLISHER_NOT_OBTAINED(30000, "StatsPublisher cannot be obtained. " +
"There was a error to retrieve the StatsPublisher, and retrying " +
- "might help. If you dont want the query to fail because accurate
statistics " +
+ "might help. If you don't want the query to fail because accurate
statistics " +
"could not be collected, set hive.stats.reliable=false"),
STATSPUBLISHER_INITIALIZATION_ERROR(30001, "StatsPublisher cannot be
initialized. " +
"There was a error in the initialization of StatsPublisher, and retrying "
+
- "might help. If you dont want the query to fail because accurate
statistics " +
+ "might help. If you don't want the query to fail because accurate
statistics " +
"could not be collected, set hive.stats.reliable=false"),
STATSPUBLISHER_CONNECTION_ERROR(30002, "StatsPublisher cannot be connected
to." +
"There was a error while connecting to the StatsPublisher, and retrying " +
- "might help. If you dont want the query to fail because accurate
statistics " +
+ "might help. If you don't want the query to fail because accurate
statistics " +
"could not be collected, set hive.stats.reliable=false"),
STATSPUBLISHER_PUBLISHING_ERROR(30003, "Error in publishing stats. There was
an " +
"error in publishing stats via StatsPublisher, and retrying " +
- "might help. If you dont want the query to fail because accurate
statistics " +
+ "might help. If you don't want the query to fail because accurate
statistics " +
"could not be collected, set hive.stats.reliable=false"),
STATSPUBLISHER_CLOSING_ERROR(30004, "StatsPublisher cannot be closed." +
"There was a error while closing the StatsPublisher, and retrying " +
- "might help. If you dont want the query to fail because accurate
statistics " +
+ "might help. If you don't want the query to fail because accurate
statistics " +
"could not be collected, set hive.stats.reliable=false"),
COLUMNSTATSCOLLECTOR_INVALID_PART_KEY(30005, "Invalid partitioning key
specified in ANALYZE " +
diff --git a/common/src/java/org/apache/hive/common/util/HiveStringUtils.java
b/common/src/java/org/apache/hive/common/util/HiveStringUtils.java
index 55abd03..ee94e41 100644
--- a/common/src/java/org/apache/hive/common/util/HiveStringUtils.java
+++ b/common/src/java/org/apache/hive/common/util/HiveStringUtils.java
@@ -298,7 +298,7 @@ public class HiveStringUtils {
*
* Given a finish and start time in long milliseconds, returns a
* String in the format Xhrs, Ymins, Z sec, for the time difference between
two times.
- * If finish time comes before start time then negative valeus of X, Y and Z
wil return.
+ * If finish time comes before start time then negative values of X, Y and Z
will return.
*
* @param finishTime finish time
* @param startTime start time
@@ -342,7 +342,7 @@ public class HiveStringUtils {
* If finish time is 0, empty string is returned, if start time is 0
* then difference is not appended to return value.
* @param dateFormat date format to use
- * @param finishTime fnish time
+ * @param finishTime finish time
* @param startTime start time
* @return formatted value.
*/
@@ -360,8 +360,8 @@ public class HiveStringUtils {
/**
* Returns an arraylist of strings.
- * @param str the comma seperated string values
- * @return the arraylist of the comma seperated string values
+ * @param str the comma separated string values
+ * @return the arraylist of the comma separated string values
*/
public static String[] getStrings(String str){
Collection<String> values = getStringCollection(str);
@@ -373,7 +373,7 @@ public class HiveStringUtils {
/**
* Returns a collection of strings.
- * @param str comma seperated string values
+ * @param str comma separated string values
* @return an <code>ArrayList</code> of string values
*/
public static Collection<String> getStringCollection(String str){
diff --git a/common/src/java/org/apache/hive/http/JMXJsonServlet.java
b/common/src/java/org/apache/hive/http/JMXJsonServlet.java
index ed95084..7ad8af3 100644
--- a/common/src/java/org/apache/hive/http/JMXJsonServlet.java
+++ b/common/src/java/org/apache/hive/http/JMXJsonServlet.java
@@ -114,7 +114,7 @@ import com.fasterxml.jackson.core.JsonGenerator;
*
* The bean's name and modelerType will be returned for all beans.
*
- * Optional paramater "callback" should be used to deliver JSONP response.
+ * Optional parameter "callback" should be used to deliver JSONP response.
*
*/
public class JMXJsonServlet extends HttpServlet {
diff --git
a/common/src/java/org/apache/hive/http/Log4j2ConfiguratorServlet.java
b/common/src/java/org/apache/hive/http/Log4j2ConfiguratorServlet.java
index 7182505..c461ca7 100644
--- a/common/src/java/org/apache/hive/http/Log4j2ConfiguratorServlet.java
+++ b/common/src/java/org/apache/hive/http/Log4j2ConfiguratorServlet.java
@@ -67,28 +67,28 @@ import com.fasterxml.jackson.databind.ObjectMapper;
* <li>
* Returns all loggers with levels in JSON format:
* <pre>
- * curl http://hostame:port/conflog
+ * curl http://hostname:port/conflog
* </pre>
* </li>
* <li>
* Set root logger to INFO:
* <pre>
* curl -v -H "Content-Type: application/json" -X POST -d '{ "loggers" :
[ { "logger" : "", "level" : "INFO" } ] }'
- * http://hostame:port/conflog
+ * http://hostname:port/conflog
* </pre>
* </li>
* <li>
* Set logger with level:
* <pre>
* curl -v -H "Content-Type: application/json" -X POST -d '{ "loggers" : [
- * { "logger" : "LlapIoOrc", "level" : "INFO" } ] }'
http://hostame:port/conflog
+ * { "logger" : "LlapIoOrc", "level" : "INFO" } ] }'
http://hostname:port/conflog
* </pre>
* </li>
* <li>
* Set log level for all classes under a package:
* <pre>
* curl -v -H "Content-Type: application/json" -X POST -d '{ "loggers" : [
- * { "logger" : "org.apache.orc", "level" : "INFO" } ] }'
http://hostame:port/conflog
+ * { "logger" : "org.apache.orc", "level" : "INFO" } ] }'
http://hostname:port/conflog
* </pre>
* </li>
* <li>
@@ -97,7 +97,7 @@ import com.fasterxml.jackson.databind.ObjectMapper;
* curl -v -H "Content-Type: application/json" -X POST -d '{ "loggers" :
[ { "logger" : "", "level" : "INFO" },
* { "logger" : "LlapIoOrc", "level" : "WARN" },
* { "logger" : "org.apache.hadoop.hive.llap.daemon.impl.LlapDaemon",
"level" : "INFO" },
- * { "logger" : "org.apache.orc", "level" : "INFO" } ] }'
http://hostame:port/conflog
+ * { "logger" : "org.apache.orc", "level" : "INFO" } ] }'
http://hostname:port/conflog
* </pre>
* </li>
* </ul>
diff --git a/common/src/java/org/apache/hive/http/ProfileServlet.java
b/common/src/java/org/apache/hive/http/ProfileServlet.java
index c32c3ec..6da23e6 100644
--- a/common/src/java/org/apache/hive/http/ProfileServlet.java
+++ b/common/src/java/org/apache/hive/http/ProfileServlet.java
@@ -36,7 +36,7 @@ import com.google.common.base.Joiner;
/**
* Servlet that runs async-profiler as web-endpoint.
- * Following options from async-profiler can be specified as query paramater.
+ * Following options from async-profiler can be specified as query parameter.
* // -e event profiling event: cpu|alloc|lock|cache-misses etc.
* // -d duration run profiling for <duration> seconds (integer)
* // -i interval sampling interval in nanoseconds (long)
diff --git
a/common/src/test/org/apache/hadoop/hive/common/jsonexplain/TestStage.java
b/common/src/test/org/apache/hadoop/hive/common/jsonexplain/TestStage.java
index 2f21caf..f1660c0 100644
--- a/common/src/test/org/apache/hadoop/hive/common/jsonexplain/TestStage.java
+++ b/common/src/test/org/apache/hadoop/hive/common/jsonexplain/TestStage.java
@@ -104,8 +104,8 @@ public class TestStage {
JSONObject object = new JSONObject(jsonString);
uut.extractVertex(object);
- assertEquals(1, uut.vertexs.size());
- assertTrue(uut.vertexs.containsKey("v1"));
+ assertEquals(1, uut.vertices.size());
+ assertTrue(uut.vertices.containsKey("v1"));
}
@Test
@@ -116,14 +116,14 @@ public class TestStage {
JSONObject object = new JSONObject(jsonString);
uut.extractVertex(object);
- assertEquals(2, uut.vertexs.size());
- assertTrue(uut.vertexs.containsKey("v1"));
- assertTrue(uut.vertexs.containsKey("v2"));
+ assertEquals(2, uut.vertices.size());
+ assertTrue(uut.vertices.containsKey("v1"));
+ assertTrue(uut.vertices.containsKey("v2"));
- assertEquals(0, uut.vertexs.get("v1").parentConnections.size());
- assertEquals(1, uut.vertexs.get("v2").parentConnections.size());
- assertEquals("v1",
uut.vertexs.get("v2").parentConnections.get(0).from.name);
- assertEquals("TYPE", uut.vertexs.get("v2").parentConnections.get(0).type);
+ assertEquals(0, uut.vertices.get("v1").parentConnections.size());
+ assertEquals(1, uut.vertices.get("v2").parentConnections.size());
+ assertEquals("v1",
uut.vertices.get("v2").parentConnections.get(0).from.name);
+ assertEquals("TYPE", uut.vertices.get("v2").parentConnections.get(0).type);
}
@@ -138,18 +138,18 @@ public class TestStage {
uut.extractVertex(object);
- assertEquals(3, uut.vertexs.size());
- assertTrue(uut.vertexs.containsKey("v1"));
- assertTrue(uut.vertexs.containsKey("v2"));
- assertTrue(uut.vertexs.containsKey("v3"));
-
- assertEquals(2, uut.vertexs.get("v1").parentConnections.size());
- assertEquals(1, uut.vertexs.get("v2").children.size());
- assertEquals(1, uut.vertexs.get("v3").children.size());
- assertEquals("v1", uut.vertexs.get("v2").children.get(0).name);
- assertEquals("v1", uut.vertexs.get("v3").children.get(0).name);
- assertEquals("TYPE1", uut.vertexs.get("v1").parentConnections.get(0).type);
- assertEquals("TYPE2", uut.vertexs.get("v1").parentConnections.get(1).type);
+ assertEquals(3, uut.vertices.size());
+ assertTrue(uut.vertices.containsKey("v1"));
+ assertTrue(uut.vertices.containsKey("v2"));
+ assertTrue(uut.vertices.containsKey("v3"));
+
+ assertEquals(2, uut.vertices.get("v1").parentConnections.size());
+ assertEquals(1, uut.vertices.get("v2").children.size());
+ assertEquals(1, uut.vertices.get("v3").children.size());
+ assertEquals("v1", uut.vertices.get("v2").children.get(0).name);
+ assertEquals("v1", uut.vertices.get("v3").children.get(0).name);
+ assertEquals("TYPE1",
uut.vertices.get("v1").parentConnections.get(0).type);
+ assertEquals("TYPE2",
uut.vertices.get("v1").parentConnections.get(1).type);
}
diff --git
a/common/src/test/org/apache/hadoop/hive/common/jsonexplain/TestVertex.java
b/common/src/test/org/apache/hadoop/hive/common/jsonexplain/TestVertex.java
index 6435edc..38250dd 100644
--- a/common/src/test/org/apache/hadoop/hive/common/jsonexplain/TestVertex.java
+++ b/common/src/test/org/apache/hadoop/hive/common/jsonexplain/TestVertex.java
@@ -42,7 +42,7 @@ public class TestVertex {
Vertex uut = new Vertex("name", object, null, tezJsonParser);
uut.extractOpTree();
- assertEquals(2, uut.mergeJoinDummyVertexs.size());
+ assertEquals(2, uut.mergeJoinDummyVertices.size());
}
@Test
diff --git
a/common/src/test/org/apache/hadoop/hive/common/type/TestDecimal128.java
b/common/src/test/org/apache/hadoop/hive/common/type/TestDecimal128.java
index fcbda1e..dd0ec22 100644
--- a/common/src/test/org/apache/hadoop/hive/common/type/TestDecimal128.java
+++ b/common/src/test/org/apache/hadoop/hive/common/type/TestDecimal128.java
@@ -629,13 +629,13 @@ public class TestDecimal128 {
final short SCALE = 33;
Decimal128 current = new Decimal128(1, SCALE);
Decimal128 multiplier = new Decimal128();
- Decimal128 dividor = new Decimal128();
+ Decimal128 divisor = new Decimal128();
Decimal128 one = new Decimal128(1);
for (int i = LOOPS; i > 0; --i) {
multiplier.update(i, SCALE);
current.multiplyDestructive(multiplier, SCALE);
- dividor.update(1 + 2 * i, SCALE);
- current.divideDestructive(dividor, SCALE);
+ divisor.update(1 + 2 * i, SCALE);
+ current.divideDestructive(divisor, SCALE);
current.addDestructive(one, SCALE);
}
current.multiplyDestructive(new Decimal128(2), SCALE);
@@ -654,17 +654,17 @@ public class TestDecimal128 {
final short SCALE = 30;
Decimal128 total = new Decimal128(0);
Decimal128 multiplier = new Decimal128();
- Decimal128 dividor = new Decimal128();
+ Decimal128 divisor = new Decimal128();
Decimal128 current = new Decimal128();
for (int i = 0; i < LOOPS; ++i) {
current.update(3, SCALE);
- dividor.update(2 * i + 1, SCALE);
- current.divideDestructive(dividor, SCALE);
+ divisor.update(2 * i + 1, SCALE);
+ current.divideDestructive(divisor, SCALE);
for (int j = 1; j <= i; ++j) {
multiplier.update(i + j, SCALE);
- dividor.update(16 * j, SCALE);
+ divisor.update(16 * j, SCALE);
current.multiplyDestructive(multiplier, SCALE);
- current.divideDestructive(dividor, SCALE);
+ current.divideDestructive(divisor, SCALE);
}
total.addDestructive(current, SCALE);
diff --git
a/common/src/test/org/apache/hadoop/hive/conf/TestHiveConfRestrictList.java
b/common/src/test/org/apache/hadoop/hive/conf/TestHiveConfRestrictList.java
index fd41c5b..1d0beaf 100644
--- a/common/src/test/org/apache/hadoop/hive/conf/TestHiveConfRestrictList.java
+++ b/common/src/test/org/apache/hadoop/hive/conf/TestHiveConfRestrictList.java
@@ -61,7 +61,7 @@ public class TestHiveConfRestrictList {
}
/**
- * Test that restrict list config itselft can't be changed
+ * Test that restrict list config itself can't be changed
* @throws Exception
*/
@Test
diff --git
a/common/src/test/org/apache/hive/common/util/TestHiveStringUtils.java
b/common/src/test/org/apache/hive/common/util/TestHiveStringUtils.java
index 22d5bc4..e204a04 100644
--- a/common/src/test/org/apache/hive/common/util/TestHiveStringUtils.java
+++ b/common/src/test/org/apache/hive/common/util/TestHiveStringUtils.java
@@ -109,7 +109,7 @@ public class TestHiveStringUtils {
* check that statement is unchanged after stripping
*/
private void assertUnchanged(String statement) {
- assertEquals("statement should not have been affected by stripping
commnents", statement,
+ assertEquals("statement should not have been affected by stripping
comments", statement,
removeComments(statement));
}
}
diff --git a/ql/src/test/results/clientnegative/stats_aggregator_error_2.q.out
b/ql/src/test/results/clientnegative/stats_aggregator_error_2.q.out
index 129577c..938c6ac 100644
--- a/ql/src/test/results/clientnegative/stats_aggregator_error_2.q.out
+++ b/ql/src/test/results/clientnegative/stats_aggregator_error_2.q.out
@@ -10,7 +10,7 @@ PREHOOK: query: INSERT OVERWRITE TABLE tmptable select * from
src
PREHOOK: type: QUERY
PREHOOK: Input: default@src
PREHOOK: Output: default@tmptable
-[Error 30017]: Skipping stats aggregation by error
org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30000]: StatsPublisher
cannot be obtained. There was a error to retrieve the StatsPublisher, and
retrying might help. If you dont want the query to fail because accurate
statistics could not be collected, set hive.stats.reliable=false
+[Error 30017]: Skipping stats aggregation by error
org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30000]: StatsPublisher
cannot be obtained. There was a error to retrieve the StatsPublisher, and
retrying might help. If you don't want the query to fail because accurate
statistics could not be collected, set hive.stats.reliable=false
POSTHOOK: query: INSERT OVERWRITE TABLE tmptable select * from src
POSTHOOK: type: QUERY
POSTHOOK: Input: default@src
@@ -26,13 +26,13 @@ Vertex failed, vertexName=Map 1, vertexId=vertex_#ID#,
diagnostics=[Task failed,
#### A masked pattern was here ####
Caused by: java.lang.RuntimeException: Hive Runtime Error while closing
operators
#### A masked pattern was here ####
-Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30000]:
StatsPublisher cannot be obtained. There was a error to retrieve the
StatsPublisher, and retrying might help. If you dont want the query to fail
because accurate statistics could not be collected, set
hive.stats.reliable=false
+Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30000]:
StatsPublisher cannot be obtained. There was a error to retrieve the
StatsPublisher, and retrying might help. If you don't want the query to fail
because accurate statistics could not be collected, set
hive.stats.reliable=false
#### A masked pattern was here ####
], TaskAttempt 1 failed, info=[Error: Error while running task ( failure ) :
attempt_#ID#:java.lang.RuntimeException: java.lang.RuntimeException: Hive
Runtime Error while closing operators
#### A masked pattern was here ####
Caused by: java.lang.RuntimeException: Hive Runtime Error while closing
operators
#### A masked pattern was here ####
-Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30000]:
StatsPublisher cannot be obtained. There was a error to retrieve the
StatsPublisher, and retrying might help. If you dont want the query to fail
because accurate statistics could not be collected, set
hive.stats.reliable=false
+Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30000]:
StatsPublisher cannot be obtained. There was a error to retrieve the
StatsPublisher, and retrying might help. If you don't want the query to fail
because accurate statistics could not be collected, set
hive.stats.reliable=false
#### A masked pattern was here ####
]], Vertex did not succeed due to OWN_TASK_FAILURE, failedTasks:1
killedTasks:0, Vertex vertex_#ID# [Map 1] killed/failed due to:OWN_TASK_FAILURE]
[Masked Vertex killed due to OTHER_VERTEX_FAILURE]
@@ -41,12 +41,12 @@ FAILED: Execution Error, return code 2 from
org.apache.hadoop.hive.ql.exec.tez.T
#### A masked pattern was here ####
Caused by: java.lang.RuntimeException: Hive Runtime Error while closing
operators
#### A masked pattern was here ####
-Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30000]:
StatsPublisher cannot be obtained. There was a error to retrieve the
StatsPublisher, and retrying might help. If you dont want the query to fail
because accurate statistics could not be collected, set
hive.stats.reliable=false
+Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30000]:
StatsPublisher cannot be obtained. There was a error to retrieve the
StatsPublisher, and retrying might help. If you don't want the query to fail
because accurate statistics could not be collected, set
hive.stats.reliable=false
#### A masked pattern was here ####
], TaskAttempt 1 failed, info=[Error: Error while running task ( failure ) :
attempt_#ID#:java.lang.RuntimeException: java.lang.RuntimeException: Hive
Runtime Error while closing operators
#### A masked pattern was here ####
Caused by: java.lang.RuntimeException: Hive Runtime Error while closing
operators
#### A masked pattern was here ####
-Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30000]:
StatsPublisher cannot be obtained. There was a error to retrieve the
StatsPublisher, and retrying might help. If you dont want the query to fail
because accurate statistics could not be collected, set
hive.stats.reliable=false
+Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30000]:
StatsPublisher cannot be obtained. There was a error to retrieve the
StatsPublisher, and retrying might help. If you don't want the query to fail
because accurate statistics could not be collected, set
hive.stats.reliable=false
#### A masked pattern was here ####
]], Vertex did not succeed due to OWN_TASK_FAILURE, failedTasks:1
killedTasks:0, Vertex vertex_#ID# [Map 1] killed/failed due
to:OWN_TASK_FAILURE][Masked Vertex killed due to OTHER_VERTEX_FAILURE]DAG did
not succeed due to VERTEX_FAILURE. failedVertices:1 killedVertices:1
diff --git a/ql/src/test/results/clientnegative/stats_publisher_error_1.q.out
b/ql/src/test/results/clientnegative/stats_publisher_error_1.q.out
index 863d689..0597add 100644
--- a/ql/src/test/results/clientnegative/stats_publisher_error_1.q.out
+++ b/ql/src/test/results/clientnegative/stats_publisher_error_1.q.out
@@ -25,13 +25,13 @@ Vertex failed, vertexName=Map 1, vertexId=vertex_#ID#,
diagnostics=[Task failed,
#### A masked pattern was here ####
Caused by: java.lang.RuntimeException: Hive Runtime Error while closing
operators
#### A masked pattern was here ####
-Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30002]:
StatsPublisher cannot be connected to.There was a error while connecting to the
StatsPublisher, and retrying might help. If you dont want the query to fail
because accurate statistics could not be collected, set
hive.stats.reliable=false
+Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30002]:
StatsPublisher cannot be connected to.There was a error while connecting to the
StatsPublisher, and retrying might help. If you don't want the query to fail
because accurate statistics could not be collected, set
hive.stats.reliable=false
#### A masked pattern was here ####
], TaskAttempt 1 failed, info=[Error: Error while running task ( failure ) :
attempt_#ID#:java.lang.RuntimeException: java.lang.RuntimeException: Hive
Runtime Error while closing operators
#### A masked pattern was here ####
Caused by: java.lang.RuntimeException: Hive Runtime Error while closing
operators
#### A masked pattern was here ####
-Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30002]:
StatsPublisher cannot be connected to.There was a error while connecting to the
StatsPublisher, and retrying might help. If you dont want the query to fail
because accurate statistics could not be collected, set
hive.stats.reliable=false
+Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30002]:
StatsPublisher cannot be connected to.There was a error while connecting to the
StatsPublisher, and retrying might help. If you don't want the query to fail
because accurate statistics could not be collected, set
hive.stats.reliable=false
#### A masked pattern was here ####
]], Vertex did not succeed due to OWN_TASK_FAILURE, failedTasks:1
killedTasks:0, Vertex vertex_#ID# [Map 1] killed/failed due to:OWN_TASK_FAILURE]
[Masked Vertex killed due to OTHER_VERTEX_FAILURE]
@@ -40,12 +40,12 @@ FAILED: Execution Error, return code 2 from
org.apache.hadoop.hive.ql.exec.tez.T
#### A masked pattern was here ####
Caused by: java.lang.RuntimeException: Hive Runtime Error while closing
operators
#### A masked pattern was here ####
-Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30002]:
StatsPublisher cannot be connected to.There was a error while connecting to the
StatsPublisher, and retrying might help. If you dont want the query to fail
because accurate statistics could not be collected, set
hive.stats.reliable=false
+Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30002]:
StatsPublisher cannot be connected to.There was a error while connecting to the
StatsPublisher, and retrying might help. If you don't want the query to fail
because accurate statistics could not be collected, set
hive.stats.reliable=false
#### A masked pattern was here ####
], TaskAttempt 1 failed, info=[Error: Error while running task ( failure ) :
attempt_#ID#:java.lang.RuntimeException: java.lang.RuntimeException: Hive
Runtime Error while closing operators
#### A masked pattern was here ####
Caused by: java.lang.RuntimeException: Hive Runtime Error while closing
operators
#### A masked pattern was here ####
-Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30002]:
StatsPublisher cannot be connected to.There was a error while connecting to the
StatsPublisher, and retrying might help. If you dont want the query to fail
because accurate statistics could not be collected, set
hive.stats.reliable=false
+Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30002]:
StatsPublisher cannot be connected to.There was a error while connecting to the
StatsPublisher, and retrying might help. If you don't want the query to fail
because accurate statistics could not be collected, set
hive.stats.reliable=false
#### A masked pattern was here ####
]], Vertex did not succeed due to OWN_TASK_FAILURE, failedTasks:1
killedTasks:0, Vertex vertex_#ID# [Map 1] killed/failed due
to:OWN_TASK_FAILURE][Masked Vertex killed due to OTHER_VERTEX_FAILURE]DAG did
not succeed due to VERTEX_FAILURE. failedVertices:1 killedVertices:1
diff --git a/ql/src/test/results/clientnegative/stats_publisher_error_2.q.out
b/ql/src/test/results/clientnegative/stats_publisher_error_2.q.out
index 129577c..938c6ac 100644
--- a/ql/src/test/results/clientnegative/stats_publisher_error_2.q.out
+++ b/ql/src/test/results/clientnegative/stats_publisher_error_2.q.out
@@ -10,7 +10,7 @@ PREHOOK: query: INSERT OVERWRITE TABLE tmptable select * from
src
PREHOOK: type: QUERY
PREHOOK: Input: default@src
PREHOOK: Output: default@tmptable
-[Error 30017]: Skipping stats aggregation by error
org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30000]: StatsPublisher
cannot be obtained. There was a error to retrieve the StatsPublisher, and
retrying might help. If you dont want the query to fail because accurate
statistics could not be collected, set hive.stats.reliable=false
+[Error 30017]: Skipping stats aggregation by error
org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30000]: StatsPublisher
cannot be obtained. There was a error to retrieve the StatsPublisher, and
retrying might help. If you don't want the query to fail because accurate
statistics could not be collected, set hive.stats.reliable=false
POSTHOOK: query: INSERT OVERWRITE TABLE tmptable select * from src
POSTHOOK: type: QUERY
POSTHOOK: Input: default@src
@@ -26,13 +26,13 @@ Vertex failed, vertexName=Map 1, vertexId=vertex_#ID#,
diagnostics=[Task failed,
#### A masked pattern was here ####
Caused by: java.lang.RuntimeException: Hive Runtime Error while closing
operators
#### A masked pattern was here ####
-Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30000]:
StatsPublisher cannot be obtained. There was a error to retrieve the
StatsPublisher, and retrying might help. If you dont want the query to fail
because accurate statistics could not be collected, set
hive.stats.reliable=false
+Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30000]:
StatsPublisher cannot be obtained. There was a error to retrieve the
StatsPublisher, and retrying might help. If you don't want the query to fail
because accurate statistics could not be collected, set
hive.stats.reliable=false
#### A masked pattern was here ####
], TaskAttempt 1 failed, info=[Error: Error while running task ( failure ) :
attempt_#ID#:java.lang.RuntimeException: java.lang.RuntimeException: Hive
Runtime Error while closing operators
#### A masked pattern was here ####
Caused by: java.lang.RuntimeException: Hive Runtime Error while closing
operators
#### A masked pattern was here ####
-Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30000]:
StatsPublisher cannot be obtained. There was a error to retrieve the
StatsPublisher, and retrying might help. If you dont want the query to fail
because accurate statistics could not be collected, set
hive.stats.reliable=false
+Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30000]:
StatsPublisher cannot be obtained. There was a error to retrieve the
StatsPublisher, and retrying might help. If you don't want the query to fail
because accurate statistics could not be collected, set
hive.stats.reliable=false
#### A masked pattern was here ####
]], Vertex did not succeed due to OWN_TASK_FAILURE, failedTasks:1
killedTasks:0, Vertex vertex_#ID# [Map 1] killed/failed due to:OWN_TASK_FAILURE]
[Masked Vertex killed due to OTHER_VERTEX_FAILURE]
@@ -41,12 +41,12 @@ FAILED: Execution Error, return code 2 from
org.apache.hadoop.hive.ql.exec.tez.T
#### A masked pattern was here ####
Caused by: java.lang.RuntimeException: Hive Runtime Error while closing
operators
#### A masked pattern was here ####
-Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30000]:
StatsPublisher cannot be obtained. There was a error to retrieve the
StatsPublisher, and retrying might help. If you dont want the query to fail
because accurate statistics could not be collected, set
hive.stats.reliable=false
+Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30000]:
StatsPublisher cannot be obtained. There was a error to retrieve the
StatsPublisher, and retrying might help. If you don't want the query to fail
because accurate statistics could not be collected, set
hive.stats.reliable=false
#### A masked pattern was here ####
], TaskAttempt 1 failed, info=[Error: Error while running task ( failure ) :
attempt_#ID#:java.lang.RuntimeException: java.lang.RuntimeException: Hive
Runtime Error while closing operators
#### A masked pattern was here ####
Caused by: java.lang.RuntimeException: Hive Runtime Error while closing
operators
#### A masked pattern was here ####
-Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30000]:
StatsPublisher cannot be obtained. There was a error to retrieve the
StatsPublisher, and retrying might help. If you dont want the query to fail
because accurate statistics could not be collected, set
hive.stats.reliable=false
+Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30000]:
StatsPublisher cannot be obtained. There was a error to retrieve the
StatsPublisher, and retrying might help. If you don't want the query to fail
because accurate statistics could not be collected, set
hive.stats.reliable=false
#### A masked pattern was here ####
]], Vertex did not succeed due to OWN_TASK_FAILURE, failedTasks:1
killedTasks:0, Vertex vertex_#ID# [Map 1] killed/failed due
to:OWN_TASK_FAILURE][Masked Vertex killed due to OTHER_VERTEX_FAILURE]DAG did
not succeed due to VERTEX_FAILURE. failedVertices:1 killedVertices:1
diff --git
a/ql/src/test/results/clientpositive/llap/stats_aggregator_error_1.q.out
b/ql/src/test/results/clientpositive/llap/stats_aggregator_error_1.q.out
index 2195922..9c64b38 100644
--- a/ql/src/test/results/clientpositive/llap/stats_aggregator_error_1.q.out
+++ b/ql/src/test/results/clientpositive/llap/stats_aggregator_error_1.q.out
@@ -68,7 +68,7 @@ PREHOOK: query: INSERT OVERWRITE TABLE tmptable_n6 select *
from src
PREHOOK: type: QUERY
PREHOOK: Input: default@src
PREHOOK: Output: default@tmptable_n6
-[Error 30017]: Skipping stats aggregation by error
org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30000]: StatsPublisher
cannot be obtained. There was a error to retrieve the StatsPublisher, and
retrying might help. If you dont want the query to fail because accurate
statistics could not be collected, set hive.stats.reliable=false
+[Error 30017]: Skipping stats aggregation by error
org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30000]: StatsPublisher
cannot be obtained. There was a error to retrieve the StatsPublisher, and
retrying might help. If you don't want the query to fail because accurate
statistics could not be collected, set hive.stats.reliable=false
POSTHOOK: query: INSERT OVERWRITE TABLE tmptable_n6 select * from src
POSTHOOK: type: QUERY
POSTHOOK: Input: default@src
diff --git
a/ql/src/test/results/clientpositive/llap/stats_publisher_error_1.q.out
b/ql/src/test/results/clientpositive/llap/stats_publisher_error_1.q.out
index 6fe20fd..7aeb1b2 100644
--- a/ql/src/test/results/clientpositive/llap/stats_publisher_error_1.q.out
+++ b/ql/src/test/results/clientpositive/llap/stats_publisher_error_1.q.out
@@ -67,7 +67,7 @@ PREHOOK: query: INSERT OVERWRITE TABLE tmptable select * from
src
PREHOOK: type: QUERY
PREHOOK: Input: default@src
PREHOOK: Output: default@tmptable
-[Error 30017]: Skipping stats aggregation by error
org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30000]: StatsPublisher
cannot be obtained. There was a error to retrieve the StatsPublisher, and
retrying might help. If you dont want the query to fail because accurate
statistics could not be collected, set hive.stats.reliable=false
+[Error 30017]: Skipping stats aggregation by error
org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30000]: StatsPublisher
cannot be obtained. There was a error to retrieve the StatsPublisher, and
retrying might help. If you don't want the query to fail because accurate
statistics could not be collected, set hive.stats.reliable=false
POSTHOOK: query: INSERT OVERWRITE TABLE tmptable select * from src
POSTHOOK: type: QUERY
POSTHOOK: Input: default@src
diff --git
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/common/metrics/common/Metrics.java
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/common/metrics/common/Metrics.java
index 99d3e57..84ab2a3 100644
---
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/common/metrics/common/Metrics.java
+++
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/common/metrics/common/Metrics.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.hive.common.metrics.common;
/**
- * Generic Metics interface.
+ * Generic Metrics interface.
*/
public interface Metrics {
@@ -112,7 +112,7 @@ public interface Metrics {
MetricsVariable<Integer> denominator);
/**
- * Mark an event occurance for a meter. Meters measure the rate of an event
and track
+ * Mark an event occurrence for a meter. Meters measure the rate of an event
and track
* 1/5/15 minute moving averages
* @param name name of the meter
*/
diff --git
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/common/metrics/metrics2/JsonFileMetricsReporter.java
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/common/metrics/metrics2/JsonFileMetricsReporter.java
index 97a6331..4038d66 100644
---
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/common/metrics/metrics2/JsonFileMetricsReporter.java
+++
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/common/metrics/metrics2/JsonFileMetricsReporter.java
@@ -114,7 +114,7 @@ public class JsonFileMetricsReporter implements
CodahaleReporter, Runnable {
if (!metricsDir.toFile().exists()) {
LOGGER.warn("Metrics directory {} does not exist, creating one",
metricsDir);
try {
- // createDirectories creates all non-existent parent directories
+ // createDirectories creates all nonexistent parent directories
Files.createDirectories(metricsDir, DIR_ATTRS);
} catch (IOException e) {
LOGGER.error("Failed to create directory {}: {}", metricsDir,
e.getMessage());