Author: namit
Date: Thu Apr 7 16:32:18 2011
New Revision: 1089938
URL: http://svn.apache.org/viewvc?rev=1089938&view=rev
Log:
HIVE-2082 Reduce memory consumption in preparing MapReduce job
(Ning Zhang via namit)
Modified:
hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/hooks/ReadEntity.java
hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java
hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/PartitionDesc.java
hive/trunk/ql/src/test/results/clientpositive/combine2.q.out
hive/trunk/ql/src/test/results/clientpositive/merge3.q.out
hive/trunk/ql/src/test/results/clientpositive/pcr.q.out
hive/trunk/ql/src/test/results/clientpositive/sample10.q.out
hive/trunk/serde/src/java/org/apache/hadoop/hive/serde2/SerDeUtils.java
Modified:
hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
URL:
http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java?rev=1089938&r1=1089937&r2=1089938&view=diff
==============================================================================
---
hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
(original)
+++
hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
Thu Apr 7 16:32:18 2011
@@ -25,8 +25,8 @@ import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
-import java.util.Properties;
import java.util.Map.Entry;
+import java.util.Properties;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
@@ -49,9 +49,9 @@ import org.apache.hadoop.hive.serde2.laz
import org.apache.hadoop.hive.serde2.objectinspector.ListObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.MapObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category;
import org.apache.hadoop.hive.serde2.objectinspector.StructField;
import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
import org.apache.hadoop.util.StringUtils;
@@ -465,6 +465,93 @@ public class MetaStoreUtils {
.getParameters(), table.getDbName(), table.getTableName(),
table.getPartitionKeys());
}
+ /**
+ * Get partition level schema from table level schema.
+ * @param sd
+ * @param tblsd
+ * @param parameters
+ * @param databaseName
+ * @param tableName
+ * @param partitionKeys
+ * @param tblSchema
+ * @return
+ */
+ public static Properties getPartSchemaFromTableSchema(
+ org.apache.hadoop.hive.metastore.api.StorageDescriptor sd,
+ org.apache.hadoop.hive.metastore.api.StorageDescriptor tblsd,
+ Map<String, String> parameters, String databaseName, String tableName,
+ List<FieldSchema> partitionKeys,
+ Properties tblSchema) {
+
+ // inherent most properties from table level schema
+ Properties schema = (Properties) tblSchema.clone();
+
+ // InputFormat
+ String inputFormat = sd.getInputFormat();
+ if (inputFormat == null || inputFormat.length() == 0) {
+ String tblInput =
+
schema.getProperty(org.apache.hadoop.hive.metastore.api.Constants.FILE_INPUT_FORMAT);
+ if (tblInput == null) {
+ inputFormat =
org.apache.hadoop.mapred.SequenceFileInputFormat.class.getName();
+ } else {
+ inputFormat = tblInput;
+ }
+ }
+
schema.setProperty(org.apache.hadoop.hive.metastore.api.Constants.FILE_INPUT_FORMAT,
+ inputFormat);
+
+ // OutputFormat
+ String outputFormat = sd.getOutputFormat();
+ if (outputFormat == null || outputFormat.length() == 0) {
+ String tblOutput =
+
schema.getProperty(org.apache.hadoop.hive.metastore.api.Constants.FILE_OUTPUT_FORMAT);
+ if (tblOutput == null) {
+ outputFormat =
org.apache.hadoop.mapred.SequenceFileOutputFormat.class.getName();
+ } else {
+ outputFormat = tblOutput;
+ }
+ }
+
schema.setProperty(org.apache.hadoop.hive.metastore.api.Constants.FILE_OUTPUT_FORMAT,
+ outputFormat);
+
+ // Location
+ if (sd.getLocation() != null) {
+
schema.setProperty(org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_LOCATION,
+ sd.getLocation());
+ }
+
+ // Bucket count
+
schema.setProperty(org.apache.hadoop.hive.metastore.api.Constants.BUCKET_COUNT,
+ Integer.toString(sd.getNumBuckets()));
+
+ if (sd.getBucketCols() != null && sd.getBucketCols().size() > 0) {
+
schema.setProperty(org.apache.hadoop.hive.metastore.api.Constants.BUCKET_FIELD_NAME,
+ sd.getBucketCols().get(0));
+ }
+
+ if (sd.getSerdeInfo() != null) {
+ for (Map.Entry<String,String> param :
sd.getSerdeInfo().getParameters().entrySet()) {
+ schema.put(param.getKey(), (param.getValue() != null) ?
param.getValue() : "");
+ }
+
+ if (sd.getSerdeInfo().getSerializationLib() != null) {
+
schema.setProperty(org.apache.hadoop.hive.serde.Constants.SERIALIZATION_LIB,
+ sd.getSerdeInfo().getSerializationLib());
+ }
+ }
+
+ // skipping columns since partition level field schemas are the same as
table level's
+ // skipping partition keys since it is the same as table level partition
keys
+
+ if (parameters != null) {
+ for (Entry<String, String> e : parameters.entrySet()) {
+ schema.setProperty(e.getKey(), e.getValue());
+ }
+ }
+
+ return schema;
+ }
+
public static Properties getSchema(
org.apache.hadoop.hive.metastore.api.StorageDescriptor sd,
org.apache.hadoop.hive.metastore.api.StorageDescriptor tblsd,
Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
URL:
http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java?rev=1089938&r1=1089937&r2=1089938&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
(original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java Thu
Apr 7 16:32:18 2011
@@ -109,8 +109,8 @@ import org.apache.hadoop.hive.ql.plan.Ma
import org.apache.hadoop.hive.ql.plan.MapredWork;
import org.apache.hadoop.hive.ql.plan.PartitionDesc;
import org.apache.hadoop.hive.ql.plan.PlanUtils;
-import org.apache.hadoop.hive.ql.plan.TableDesc;
import org.apache.hadoop.hive.ql.plan.PlanUtils.ExpressionTypes;
+import org.apache.hadoop.hive.ql.plan.TableDesc;
import org.apache.hadoop.hive.ql.stats.StatsFactory;
import org.apache.hadoop.hive.ql.stats.StatsPublisher;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDF;
@@ -125,8 +125,8 @@ import org.apache.hadoop.hive.serde2.typ
import org.apache.hadoop.hive.shims.ShimLoader;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.SequenceFile;
-import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.SequenceFile.CompressionType;
+import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.DefaultCodec;
import org.apache.hadoop.mapred.FileOutputFormat;
@@ -692,9 +692,14 @@ public final class Utilities {
return (new PartitionDesc(part));
}
+ public static PartitionDesc getPartitionDescFromTableDesc(TableDesc tblDesc,
Partition part)
+ throws HiveException {
+ return new PartitionDesc(part, tblDesc);
+ }
+
public static void addMapWork(MapredWork mr, Table tbl, String alias,
Operator<?> work) {
mr.addMapWork(tbl.getDataLocation().getPath(), alias, work, new
PartitionDesc(
- getTableDesc(tbl), null));
+ getTableDesc(tbl), (LinkedHashMap<String, String>) null));
}
private static String getOpTreeSkel_helper(Operator<?> op, String indent) {
Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/hooks/ReadEntity.java
URL:
http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/hooks/ReadEntity.java?rev=1089938&r1=1089937&r2=1089938&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/hooks/ReadEntity.java
(original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/hooks/ReadEntity.java Thu
Apr 7 16:32:18 2011
@@ -32,7 +32,7 @@ import org.apache.hadoop.hive.ql.metadat
public class ReadEntity implements Serializable {
private static final long serialVersionUID = 1L;
-
+
/**
* The table.
*/
@@ -45,11 +45,14 @@ public class ReadEntity implements Seria
/**
* This is derived from t and p, but we need to serialize this field to make
sure
- * ReadEntity.hashCode() does not need to recursively read into t and p.
+ * ReadEntity.hashCode() does not need to recursively read into t and p.
*/
private String name;
-
+
public String getName() {
+ if (name == null) {
+ name = computeName();
+ }
return name;
}
@@ -78,40 +81,47 @@ public class ReadEntity implements Seria
*/
public ReadEntity() {
}
-
+
/**
* Constructor.
- *
+ *
* @param t
* The Table that the query reads from.
*/
public ReadEntity(Table t) {
this.t = t;
p = null;
- name = computeName();
+ name = null;
}
/**
* Constructor given a partiton.
- *
+ *
* @param p
* The partition that the query reads from.
*/
public ReadEntity(Partition p) {
t = p.getTable();
this.p = p;
- name = computeName();
+ name = null;
}
private String computeName() {
+ StringBuilder sb = new StringBuilder();
if (p != null) {
- return p.getTable().getDbName() + "@" + p.getTable().getTableName() + "@"
- + p.getName();
+ sb.append(p.getTable().getDbName());
+ sb.append('@');
+ sb.append(p.getTable().getTableName());
+ sb.append('@');
+ sb.append(p.getName());
} else {
- return t.getDbName() + "@" + t.getTableName();
+ sb.append(t.getDbName());
+ sb.append('@');
+ sb.append(t.getTableName());
}
+ return sb.toString();
}
-
+
/**
* Enum that tells what time of a read entity this is.
*/
@@ -167,6 +177,9 @@ public class ReadEntity implements Seria
*/
@Override
public String toString() {
+ if (name == null) {
+ name = computeName();
+ }
return name;
}
Modified:
hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java
URL:
http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java?rev=1089938&r1=1089937&r2=1089938&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java
(original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java
Thu Apr 7 16:32:18 2011
@@ -258,10 +258,29 @@ public class Partition implements Serial
return deserializer;
}
+ final public Deserializer getDeserializer(Properties props) {
+ if (deserializer == null) {
+ try {
+ deserializer = MetaStoreUtils.getDeserializer(Hive.get().getConf(),
props);
+ } catch (HiveException e) {
+ throw new RuntimeException(e);
+ } catch (MetaException e) {
+ throw new RuntimeException(e);
+ }
+ }
+ return deserializer;
+ }
+
public Properties getSchema() {
return MetaStoreUtils.getSchema(tPartition, table.getTTable());
}
+ public Properties getSchemaFromTableSchema(Properties tblSchema) {
+ return MetaStoreUtils.getPartSchemaFromTableSchema(tPartition.getSd(),
table.getTTable().getSd(),
+ tPartition.getParameters(), table.getDbName(), table.getTableName(),
table.getPartitionKeys(),
+ tblSchema);
+ }
+
/**
* @param inputFormatClass
*/
Modified:
hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
URL:
http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java?rev=1089938&r1=1089937&r2=1089938&view=diff
==============================================================================
---
hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
(original)
+++
hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
Thu Apr 7 16:32:18 2011
@@ -60,16 +60,16 @@ import org.apache.hadoop.hive.ql.parse.R
import org.apache.hadoop.hive.ql.parse.SemanticException;
import org.apache.hadoop.hive.ql.plan.FetchWork;
import org.apache.hadoop.hive.ql.plan.FileSinkDesc;
+import org.apache.hadoop.hive.ql.plan.FilterDesc.sampleDesc;
import org.apache.hadoop.hive.ql.plan.MapJoinDesc;
import org.apache.hadoop.hive.ql.plan.MapredLocalWork;
+import org.apache.hadoop.hive.ql.plan.MapredLocalWork.BucketMapJoinContext;
import org.apache.hadoop.hive.ql.plan.MapredWork;
import org.apache.hadoop.hive.ql.plan.PartitionDesc;
import org.apache.hadoop.hive.ql.plan.PlanUtils;
import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc;
import org.apache.hadoop.hive.ql.plan.TableDesc;
import org.apache.hadoop.hive.ql.plan.TableScanDesc;
-import org.apache.hadoop.hive.ql.plan.FilterDesc.sampleDesc;
-import org.apache.hadoop.hive.ql.plan.MapredLocalWork.BucketMapJoinContext;
/**
* General utility common functions for the Processor to convert operator into
@@ -611,6 +611,8 @@ public final class GenMapRedUtils {
tblDir = paths[0];
tblDesc = Utilities.getTableDesc(part.getTable());
+ } else if (tblDesc == null) {
+ tblDesc = Utilities.getTableDesc(part.getTable());
}
for (Path p : paths) {
@@ -624,7 +626,7 @@ public final class GenMapRedUtils {
partDir.add(p);
try {
- partDesc.add(Utilities.getPartitionDesc(part));
+ partDesc.add(Utilities.getPartitionDescFromTableDesc(tblDesc, part));
} catch (HiveException e) {
LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e));
throw new SemanticException(e.getMessage(), e);
Modified:
hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/PartitionDesc.java
URL:
http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/PartitionDesc.java?rev=1089938&r1=1089937&r2=1089938&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/PartitionDesc.java
(original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/PartitionDesc.java
Thu Apr 7 16:32:18 2011
@@ -18,19 +18,19 @@
package org.apache.hadoop.hive.ql.plan;
-import java.io.File;
import java.io.Serializable;
-import java.net.URI;
import java.util.Enumeration;
import java.util.Properties;
+import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.Utilities;
import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils;
import org.apache.hadoop.hive.ql.io.HiveOutputFormat;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.serde2.Deserializer;
+import org.apache.hadoop.hive.serde2.SerDeException;
+import org.apache.hadoop.hive.serde2.SerDeUtils;
import org.apache.hadoop.mapred.InputFormat;
-import org.apache.hadoop.fs.Path;
/**
* PartitionDesc.
@@ -46,7 +46,7 @@ public class PartitionDesc implements Se
private Class<? extends HiveOutputFormat> outputFileFormatClass;
private java.util.Properties properties;
private String serdeClassName;
-
+
private transient String baseFileName;
public void setBaseFileName(String baseFileName) {
@@ -68,6 +68,7 @@ public class PartitionDesc implements Se
final Class<?> outputFormat, final java.util.Properties properties,
final String serdeClassName) {
this.tableDesc = table;
+ this.properties = properties;
this.partSpec = partSpec;
deserializerClass = serdeClass;
this.inputFileFormatClass = inputFileFormatClass;
@@ -75,8 +76,9 @@ public class PartitionDesc implements Se
outputFileFormatClass = HiveFileFormatUtils
.getOutputFormatSubstitute(outputFormat);
}
- this.properties = properties;
- if (properties != null) {
+ if (serdeClassName != null) {
+ this.serdeClassName = serdeClassName;
+ } else if (properties != null) {
this.serdeClassName = properties
.getProperty(org.apache.hadoop.hive.serde.Constants.SERIALIZATION_LIB);
}
@@ -85,16 +87,36 @@ public class PartitionDesc implements Se
public PartitionDesc(final org.apache.hadoop.hive.ql.metadata.Partition part)
throws HiveException {
tableDesc = Utilities.getTableDesc(part.getTable());
+ properties = part.getSchema();
partSpec = part.getSpec();
- deserializerClass = part.getDeserializer().getClass();
+ deserializerClass = part.getDeserializer(properties).getClass();
inputFileFormatClass = part.getInputFormatClass();
outputFileFormatClass = part.getOutputFormatClass();
- properties = part.getSchema();
serdeClassName = properties
.getProperty(org.apache.hadoop.hive.serde.Constants.SERIALIZATION_LIB);
;
}
+ public PartitionDesc(final org.apache.hadoop.hive.ql.metadata.Partition part,
+ final TableDesc tblDesc) throws HiveException {
+ tableDesc = tblDesc;
+ properties = part.getSchemaFromTableSchema(tblDesc.getProperties()); //
each partition maintains a large properties
+ partSpec = part.getSpec();
+ // deserializerClass = part.getDeserializer(properties).getClass();
+ Deserializer deserializer;
+ try {
+ deserializer = SerDeUtils.lookupDeserializer(
+
properties.getProperty(org.apache.hadoop.hive.serde.Constants.SERIALIZATION_LIB));
+ } catch (SerDeException e) {
+ throw new HiveException(e);
+ }
+ deserializerClass = deserializer.getClass();
+ inputFileFormatClass = part.getInputFormatClass();
+ outputFileFormatClass = part.getOutputFormatClass();
+ serdeClassName = properties.getProperty(
+ org.apache.hadoop.hive.serde.Constants.SERIALIZATION_LIB);
+ }
+
@Explain(displayName = "")
public TableDesc getTableDesc() {
return tableDesc;
@@ -239,13 +261,13 @@ public class PartitionDesc implements Se
/**
* Attempt to derive a virtual <code>base file name</code> property from the
* path. If path format is unrecognized, just use the full path.
- *
+ *
* @param path
* URI to the partition file
*/
void deriveBaseFileName(String path) {
PlanUtils.configureTableJobPropertiesForStorageHandler(tableDesc);
-
+
if (path == null) {
return;
}
Modified: hive/trunk/ql/src/test/results/clientpositive/combine2.q.out
URL:
http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/combine2.q.out?rev=1089938&r1=1089937&r2=1089938&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/combine2.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/combine2.q.out Thu Apr 7
16:32:18 2011
@@ -132,7 +132,7 @@ PREHOOK: Input: default@combine2@value=v
PREHOOK: Input: default@combine2@value=val_8
PREHOOK: Input: default@combine2@value=val_9
PREHOOK: Input: default@combine2@value=|
-PREHOOK: Output:
file:/tmp/sdong/hive_2011-02-10_01-40-36_801_7329954255470641875/-mr-10000
+PREHOOK: Output:
file:/tmp/nzhang/hive_2011-04-05_21-03-47_826_7678427953642362240/-mr-10000
POSTHOOK: query: select key, value from combine2 where value is not null order
by key
POSTHOOK: type: QUERY
POSTHOOK: Input: default@combine2@value=2010-04-21 09%3A45%3A00
@@ -143,7 +143,7 @@ POSTHOOK: Input: default@combine2@value=
POSTHOOK: Input: default@combine2@value=val_8
POSTHOOK: Input: default@combine2@value=val_9
POSTHOOK: Input: default@combine2@value=|
-POSTHOOK: Output:
file:/tmp/sdong/hive_2011-02-10_01-40-36_801_7329954255470641875/-mr-10000
+POSTHOOK: Output:
file:/tmp/nzhang/hive_2011-04-05_21-03-47_826_7678427953642362240/-mr-10000
POSTHOOK: Lineage: combine2 PARTITION(value=2010-04-21 09:45:00).key
EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default),
(src)src.FieldSchema(name:key, type:string, comment:default),
(src)src.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: combine2 PARTITION(value=val_0).key EXPRESSION
[(src)src.FieldSchema(name:key, type:string, comment:default),
(src)src.FieldSchema(name:key, type:string, comment:default),
(src)src.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: combine2 PARTITION(value=val_2).key EXPRESSION
[(src)src.FieldSchema(name:key, type:string, comment:default),
(src)src.FieldSchema(name:key, type:string, comment:default),
(src)src.FieldSchema(name:key, type:string, comment:default), ]
@@ -213,16 +213,16 @@ STAGE PLANS:
type: bigint
Needs Tagging: false
Path -> Alias:
-
pfile:/data/users/sdong/www/open-source-hive1/build/ql/test/data/warehouse/combine2/value=2010-04-21
09%3A45%3A00 [combine2]
-
pfile:/data/users/sdong/www/open-source-hive1/build/ql/test/data/warehouse/combine2/value=val_0
[combine2]
-
pfile:/data/users/sdong/www/open-source-hive1/build/ql/test/data/warehouse/combine2/value=val_2
[combine2]
-
pfile:/data/users/sdong/www/open-source-hive1/build/ql/test/data/warehouse/combine2/value=val_4
[combine2]
-
pfile:/data/users/sdong/www/open-source-hive1/build/ql/test/data/warehouse/combine2/value=val_5
[combine2]
-
pfile:/data/users/sdong/www/open-source-hive1/build/ql/test/data/warehouse/combine2/value=val_8
[combine2]
-
pfile:/data/users/sdong/www/open-source-hive1/build/ql/test/data/warehouse/combine2/value=val_9
[combine2]
-
pfile:/data/users/sdong/www/open-source-hive1/build/ql/test/data/warehouse/combine2/value=|
[combine2]
+
pfile:/data/users/nzhang/work/3/apache-hive/build/ql/test/data/warehouse/combine2/value=2010-04-21
09%3A45%3A00 [combine2]
+
pfile:/data/users/nzhang/work/3/apache-hive/build/ql/test/data/warehouse/combine2/value=val_0
[combine2]
+
pfile:/data/users/nzhang/work/3/apache-hive/build/ql/test/data/warehouse/combine2/value=val_2
[combine2]
+
pfile:/data/users/nzhang/work/3/apache-hive/build/ql/test/data/warehouse/combine2/value=val_4
[combine2]
+
pfile:/data/users/nzhang/work/3/apache-hive/build/ql/test/data/warehouse/combine2/value=val_5
[combine2]
+
pfile:/data/users/nzhang/work/3/apache-hive/build/ql/test/data/warehouse/combine2/value=val_8
[combine2]
+
pfile:/data/users/nzhang/work/3/apache-hive/build/ql/test/data/warehouse/combine2/value=val_9
[combine2]
+
pfile:/data/users/nzhang/work/3/apache-hive/build/ql/test/data/warehouse/combine2/value=|
[combine2]
Path -> Partition:
-
pfile:/data/users/sdong/www/open-source-hive1/build/ql/test/data/warehouse/combine2/value=2010-04-21
09%3A45%3A00
+
pfile:/data/users/nzhang/work/3/apache-hive/build/ql/test/data/warehouse/combine2/value=2010-04-21
09%3A45%3A00
Partition
base file name: value=2010-04-21 09%3A45%3A00
input format: org.apache.hadoop.mapred.TextInputFormat
@@ -235,17 +235,17 @@ STAGE PLANS:
columns.types string
file.inputformat org.apache.hadoop.mapred.TextInputFormat
file.outputformat
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- location
pfile:/data/users/sdong/www/open-source-hive1/build/ql/test/data/warehouse/combine2/value=2010-04-21
09%3A45%3A00
+ location
pfile:/data/users/nzhang/work/3/apache-hive/build/ql/test/data/warehouse/combine2/value=2010-04-21
09%3A45%3A00
name default.combine2
- numFiles 8
+ numFiles 1
numPartitions 8
- numRows 12
+ numRows 1
partition_columns value
serialization.ddl struct combine2 { string key}
serialization.format 1
serialization.lib
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 26
- transient_lastDdlTime 1297330836
+ totalSize 3
+ transient_lastDdlTime 1302062626
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
input format: org.apache.hadoop.mapred.TextInputFormat
@@ -256,7 +256,7 @@ STAGE PLANS:
columns.types string
file.inputformat org.apache.hadoop.mapred.TextInputFormat
file.outputformat
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- location
pfile:/data/users/sdong/www/open-source-hive1/build/ql/test/data/warehouse/combine2
+ location
pfile:/data/users/nzhang/work/3/apache-hive/build/ql/test/data/warehouse/combine2
name default.combine2
numFiles 8
numPartitions 8
@@ -266,11 +266,11 @@ STAGE PLANS:
serialization.format 1
serialization.lib
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
totalSize 26
- transient_lastDdlTime 1297330836
+ transient_lastDdlTime 1302062627
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: default.combine2
name: default.combine2
-
pfile:/data/users/sdong/www/open-source-hive1/build/ql/test/data/warehouse/combine2/value=val_0
+
pfile:/data/users/nzhang/work/3/apache-hive/build/ql/test/data/warehouse/combine2/value=val_0
Partition
base file name: value=val_0
input format: org.apache.hadoop.mapred.TextInputFormat
@@ -283,17 +283,17 @@ STAGE PLANS:
columns.types string
file.inputformat org.apache.hadoop.mapred.TextInputFormat
file.outputformat
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- location
pfile:/data/users/sdong/www/open-source-hive1/build/ql/test/data/warehouse/combine2/value=val_0
+ location
pfile:/data/users/nzhang/work/3/apache-hive/build/ql/test/data/warehouse/combine2/value=val_0
name default.combine2
- numFiles 8
+ numFiles 1
numPartitions 8
- numRows 12
+ numRows 3
partition_columns value
serialization.ddl struct combine2 { string key}
serialization.format 1
serialization.lib
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 26
- transient_lastDdlTime 1297330836
+ totalSize 6
+ transient_lastDdlTime 1302062626
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
input format: org.apache.hadoop.mapred.TextInputFormat
@@ -304,7 +304,7 @@ STAGE PLANS:
columns.types string
file.inputformat org.apache.hadoop.mapred.TextInputFormat
file.outputformat
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- location
pfile:/data/users/sdong/www/open-source-hive1/build/ql/test/data/warehouse/combine2
+ location
pfile:/data/users/nzhang/work/3/apache-hive/build/ql/test/data/warehouse/combine2
name default.combine2
numFiles 8
numPartitions 8
@@ -314,11 +314,11 @@ STAGE PLANS:
serialization.format 1
serialization.lib
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
totalSize 26
- transient_lastDdlTime 1297330836
+ transient_lastDdlTime 1302062627
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: default.combine2
name: default.combine2
-
pfile:/data/users/sdong/www/open-source-hive1/build/ql/test/data/warehouse/combine2/value=val_2
+
pfile:/data/users/nzhang/work/3/apache-hive/build/ql/test/data/warehouse/combine2/value=val_2
Partition
base file name: value=val_2
input format: org.apache.hadoop.mapred.TextInputFormat
@@ -331,17 +331,17 @@ STAGE PLANS:
columns.types string
file.inputformat org.apache.hadoop.mapred.TextInputFormat
file.outputformat
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- location
pfile:/data/users/sdong/www/open-source-hive1/build/ql/test/data/warehouse/combine2/value=val_2
+ location
pfile:/data/users/nzhang/work/3/apache-hive/build/ql/test/data/warehouse/combine2/value=val_2
name default.combine2
- numFiles 8
+ numFiles 1
numPartitions 8
- numRows 12
+ numRows 1
partition_columns value
serialization.ddl struct combine2 { string key}
serialization.format 1
serialization.lib
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 26
- transient_lastDdlTime 1297330836
+ totalSize 2
+ transient_lastDdlTime 1302062627
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
input format: org.apache.hadoop.mapred.TextInputFormat
@@ -352,7 +352,7 @@ STAGE PLANS:
columns.types string
file.inputformat org.apache.hadoop.mapred.TextInputFormat
file.outputformat
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- location
pfile:/data/users/sdong/www/open-source-hive1/build/ql/test/data/warehouse/combine2
+ location
pfile:/data/users/nzhang/work/3/apache-hive/build/ql/test/data/warehouse/combine2
name default.combine2
numFiles 8
numPartitions 8
@@ -362,11 +362,11 @@ STAGE PLANS:
serialization.format 1
serialization.lib
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
totalSize 26
- transient_lastDdlTime 1297330836
+ transient_lastDdlTime 1302062627
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: default.combine2
name: default.combine2
-
pfile:/data/users/sdong/www/open-source-hive1/build/ql/test/data/warehouse/combine2/value=val_4
+
pfile:/data/users/nzhang/work/3/apache-hive/build/ql/test/data/warehouse/combine2/value=val_4
Partition
base file name: value=val_4
input format: org.apache.hadoop.mapred.TextInputFormat
@@ -379,17 +379,17 @@ STAGE PLANS:
columns.types string
file.inputformat org.apache.hadoop.mapred.TextInputFormat
file.outputformat
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- location
pfile:/data/users/sdong/www/open-source-hive1/build/ql/test/data/warehouse/combine2/value=val_4
+ location
pfile:/data/users/nzhang/work/3/apache-hive/build/ql/test/data/warehouse/combine2/value=val_4
name default.combine2
- numFiles 8
+ numFiles 1
numPartitions 8
- numRows 12
+ numRows 1
partition_columns value
serialization.ddl struct combine2 { string key}
serialization.format 1
serialization.lib
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 26
- transient_lastDdlTime 1297330836
+ totalSize 2
+ transient_lastDdlTime 1302062627
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
input format: org.apache.hadoop.mapred.TextInputFormat
@@ -400,7 +400,7 @@ STAGE PLANS:
columns.types string
file.inputformat org.apache.hadoop.mapred.TextInputFormat
file.outputformat
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- location
pfile:/data/users/sdong/www/open-source-hive1/build/ql/test/data/warehouse/combine2
+ location
pfile:/data/users/nzhang/work/3/apache-hive/build/ql/test/data/warehouse/combine2
name default.combine2
numFiles 8
numPartitions 8
@@ -410,11 +410,11 @@ STAGE PLANS:
serialization.format 1
serialization.lib
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
totalSize 26
- transient_lastDdlTime 1297330836
+ transient_lastDdlTime 1302062627
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: default.combine2
name: default.combine2
-
pfile:/data/users/sdong/www/open-source-hive1/build/ql/test/data/warehouse/combine2/value=val_5
+
pfile:/data/users/nzhang/work/3/apache-hive/build/ql/test/data/warehouse/combine2/value=val_5
Partition
base file name: value=val_5
input format: org.apache.hadoop.mapred.TextInputFormat
@@ -427,17 +427,17 @@ STAGE PLANS:
columns.types string
file.inputformat org.apache.hadoop.mapred.TextInputFormat
file.outputformat
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- location
pfile:/data/users/sdong/www/open-source-hive1/build/ql/test/data/warehouse/combine2/value=val_5
+ location
pfile:/data/users/nzhang/work/3/apache-hive/build/ql/test/data/warehouse/combine2/value=val_5
name default.combine2
- numFiles 8
+ numFiles 1
numPartitions 8
- numRows 12
+ numRows 3
partition_columns value
serialization.ddl struct combine2 { string key}
serialization.format 1
serialization.lib
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 26
- transient_lastDdlTime 1297330836
+ totalSize 6
+ transient_lastDdlTime 1302062627
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
input format: org.apache.hadoop.mapred.TextInputFormat
@@ -448,7 +448,7 @@ STAGE PLANS:
columns.types string
file.inputformat org.apache.hadoop.mapred.TextInputFormat
file.outputformat
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- location
pfile:/data/users/sdong/www/open-source-hive1/build/ql/test/data/warehouse/combine2
+ location
pfile:/data/users/nzhang/work/3/apache-hive/build/ql/test/data/warehouse/combine2
name default.combine2
numFiles 8
numPartitions 8
@@ -458,11 +458,11 @@ STAGE PLANS:
serialization.format 1
serialization.lib
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
totalSize 26
- transient_lastDdlTime 1297330836
+ transient_lastDdlTime 1302062627
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: default.combine2
name: default.combine2
-
pfile:/data/users/sdong/www/open-source-hive1/build/ql/test/data/warehouse/combine2/value=val_8
+
pfile:/data/users/nzhang/work/3/apache-hive/build/ql/test/data/warehouse/combine2/value=val_8
Partition
base file name: value=val_8
input format: org.apache.hadoop.mapred.TextInputFormat
@@ -475,17 +475,17 @@ STAGE PLANS:
columns.types string
file.inputformat org.apache.hadoop.mapred.TextInputFormat
file.outputformat
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- location
pfile:/data/users/sdong/www/open-source-hive1/build/ql/test/data/warehouse/combine2/value=val_8
+ location
pfile:/data/users/nzhang/work/3/apache-hive/build/ql/test/data/warehouse/combine2/value=val_8
name default.combine2
- numFiles 8
+ numFiles 1
numPartitions 8
- numRows 12
+ numRows 1
partition_columns value
serialization.ddl struct combine2 { string key}
serialization.format 1
serialization.lib
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 26
- transient_lastDdlTime 1297330836
+ totalSize 2
+ transient_lastDdlTime 1302062627
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
input format: org.apache.hadoop.mapred.TextInputFormat
@@ -496,7 +496,7 @@ STAGE PLANS:
columns.types string
file.inputformat org.apache.hadoop.mapred.TextInputFormat
file.outputformat
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- location
pfile:/data/users/sdong/www/open-source-hive1/build/ql/test/data/warehouse/combine2
+ location
pfile:/data/users/nzhang/work/3/apache-hive/build/ql/test/data/warehouse/combine2
name default.combine2
numFiles 8
numPartitions 8
@@ -506,11 +506,11 @@ STAGE PLANS:
serialization.format 1
serialization.lib
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
totalSize 26
- transient_lastDdlTime 1297330836
+ transient_lastDdlTime 1302062627
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: default.combine2
name: default.combine2
-
pfile:/data/users/sdong/www/open-source-hive1/build/ql/test/data/warehouse/combine2/value=val_9
+
pfile:/data/users/nzhang/work/3/apache-hive/build/ql/test/data/warehouse/combine2/value=val_9
Partition
base file name: value=val_9
input format: org.apache.hadoop.mapred.TextInputFormat
@@ -523,17 +523,17 @@ STAGE PLANS:
columns.types string
file.inputformat org.apache.hadoop.mapred.TextInputFormat
file.outputformat
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- location
pfile:/data/users/sdong/www/open-source-hive1/build/ql/test/data/warehouse/combine2/value=val_9
+ location
pfile:/data/users/nzhang/work/3/apache-hive/build/ql/test/data/warehouse/combine2/value=val_9
name default.combine2
- numFiles 8
+ numFiles 1
numPartitions 8
- numRows 12
+ numRows 1
partition_columns value
serialization.ddl struct combine2 { string key}
serialization.format 1
serialization.lib
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 26
- transient_lastDdlTime 1297330836
+ totalSize 2
+ transient_lastDdlTime 1302062627
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
input format: org.apache.hadoop.mapred.TextInputFormat
@@ -544,7 +544,7 @@ STAGE PLANS:
columns.types string
file.inputformat org.apache.hadoop.mapred.TextInputFormat
file.outputformat
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- location
pfile:/data/users/sdong/www/open-source-hive1/build/ql/test/data/warehouse/combine2
+ location
pfile:/data/users/nzhang/work/3/apache-hive/build/ql/test/data/warehouse/combine2
name default.combine2
numFiles 8
numPartitions 8
@@ -554,11 +554,11 @@ STAGE PLANS:
serialization.format 1
serialization.lib
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
totalSize 26
- transient_lastDdlTime 1297330836
+ transient_lastDdlTime 1302062627
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: default.combine2
name: default.combine2
-
pfile:/data/users/sdong/www/open-source-hive1/build/ql/test/data/warehouse/combine2/value=|
+
pfile:/data/users/nzhang/work/3/apache-hive/build/ql/test/data/warehouse/combine2/value=|
Partition
base file name: value=|
input format: org.apache.hadoop.mapred.TextInputFormat
@@ -571,17 +571,17 @@ STAGE PLANS:
columns.types string
file.inputformat org.apache.hadoop.mapred.TextInputFormat
file.outputformat
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- location
pfile:/data/users/sdong/www/open-source-hive1/build/ql/test/data/warehouse/combine2/value=|
+ location
pfile:/data/users/nzhang/work/3/apache-hive/build/ql/test/data/warehouse/combine2/value=|
name default.combine2
- numFiles 8
+ numFiles 1
numPartitions 8
- numRows 12
+ numRows 1
partition_columns value
serialization.ddl struct combine2 { string key}
serialization.format 1
serialization.lib
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 26
- transient_lastDdlTime 1297330836
+ totalSize 3
+ transient_lastDdlTime 1302062626
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
input format: org.apache.hadoop.mapred.TextInputFormat
@@ -592,7 +592,7 @@ STAGE PLANS:
columns.types string
file.inputformat org.apache.hadoop.mapred.TextInputFormat
file.outputformat
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- location
pfile:/data/users/sdong/www/open-source-hive1/build/ql/test/data/warehouse/combine2
+ location
pfile:/data/users/nzhang/work/3/apache-hive/build/ql/test/data/warehouse/combine2
name default.combine2
numFiles 8
numPartitions 8
@@ -602,7 +602,7 @@ STAGE PLANS:
serialization.format 1
serialization.lib
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
totalSize 26
- transient_lastDdlTime 1297330836
+ transient_lastDdlTime 1302062627
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: default.combine2
name: default.combine2
@@ -621,9 +621,9 @@ STAGE PLANS:
File Output Operator
compressed: false
GlobalTableId: 0
- directory:
file:/tmp/sdong/hive_2011-02-10_01-40-41_145_7466410298733488021/-ext-10001
+ directory:
file:/tmp/nzhang/hive_2011-04-05_21-03-51_912_8204422900678602376/-ext-10001
NumFilesPerFileSink: 1
- Stats Publishing Key Prefix:
file:/tmp/sdong/hive_2011-02-10_01-40-41_145_7466410298733488021/-ext-10001/
+ Stats Publishing Key Prefix:
file:/tmp/nzhang/hive_2011-04-05_21-03-51_912_8204422900678602376/-ext-10001/
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -650,7 +650,7 @@ PREHOOK: Input: default@combine2@value=v
PREHOOK: Input: default@combine2@value=val_8
PREHOOK: Input: default@combine2@value=val_9
PREHOOK: Input: default@combine2@value=|
-PREHOOK: Output:
file:/tmp/sdong/hive_2011-02-10_01-40-41_363_7045672378046976890/-mr-10000
+PREHOOK: Output:
file:/tmp/nzhang/hive_2011-04-05_21-03-52_108_2870620728994249070/-mr-10000
POSTHOOK: query: select count(1) from combine2 where value is not null
POSTHOOK: type: QUERY
POSTHOOK: Input: default@combine2@value=2010-04-21 09%3A45%3A00
@@ -661,7 +661,7 @@ POSTHOOK: Input: default@combine2@value=
POSTHOOK: Input: default@combine2@value=val_8
POSTHOOK: Input: default@combine2@value=val_9
POSTHOOK: Input: default@combine2@value=|
-POSTHOOK: Output:
file:/tmp/sdong/hive_2011-02-10_01-40-41_363_7045672378046976890/-mr-10000
+POSTHOOK: Output:
file:/tmp/nzhang/hive_2011-04-05_21-03-52_108_2870620728994249070/-mr-10000
POSTHOOK: Lineage: combine2 PARTITION(value=2010-04-21 09:45:00).key
EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default),
(src)src.FieldSchema(name:key, type:string, comment:default),
(src)src.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: combine2 PARTITION(value=val_0).key EXPRESSION
[(src)src.FieldSchema(name:key, type:string, comment:default),
(src)src.FieldSchema(name:key, type:string, comment:default),
(src)src.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: combine2 PARTITION(value=val_2).key EXPRESSION
[(src)src.FieldSchema(name:key, type:string, comment:default),
(src)src.FieldSchema(name:key, type:string, comment:default),
(src)src.FieldSchema(name:key, type:string, comment:default), ]
@@ -764,14 +764,14 @@ PREHOOK: Input: default@srcpart@ds=2008-
PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-PREHOOK: Output:
file:/tmp/sdong/hive_2011-02-10_01-40-45_760_3016201461459323584/-mr-10000
+PREHOOK: Output:
file:/tmp/nzhang/hive_2011-04-05_21-03-56_188_7258512604152883399/-mr-10000
POSTHOOK: query: select ds, count(1) from srcpart where ds is not null group
by ds
POSTHOOK: type: QUERY
POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-POSTHOOK: Output:
file:/tmp/sdong/hive_2011-02-10_01-40-45_760_3016201461459323584/-mr-10000
+POSTHOOK: Output:
file:/tmp/nzhang/hive_2011-04-05_21-03-56_188_7258512604152883399/-mr-10000
POSTHOOK: Lineage: combine2 PARTITION(value=2010-04-21 09:45:00).key
EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default),
(src)src.FieldSchema(name:key, type:string, comment:default),
(src)src.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: combine2 PARTITION(value=val_0).key EXPRESSION
[(src)src.FieldSchema(name:key, type:string, comment:default),
(src)src.FieldSchema(name:key, type:string, comment:default),
(src)src.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: combine2 PARTITION(value=val_2).key EXPRESSION
[(src)src.FieldSchema(name:key, type:string, comment:default),
(src)src.FieldSchema(name:key, type:string, comment:default),
(src)src.FieldSchema(name:key, type:string, comment:default), ]