Author: toffer
Date: Mon Oct 1 03:26:24 2012
New Revision: 1392185
URL: http://svn.apache.org/viewvc?rev=1392185&view=rev
Log:
HCAT-513 Data Store onto HCatalog table fails for dynamic partitioning as the
temporary directory gets deleted by the completed map tasks (amalakar via
toffer)
Modified:
incubator/hcatalog/trunk/CHANGES.txt
incubator/hcatalog/trunk/src/java/org/apache/hcatalog/mapreduce/FileOutputCommitterContainer.java
incubator/hcatalog/trunk/src/java/org/apache/hcatalog/mapreduce/FileRecordWriterContainer.java
Modified: incubator/hcatalog/trunk/CHANGES.txt
URL:
http://svn.apache.org/viewvc/incubator/hcatalog/trunk/CHANGES.txt?rev=1392185&r1=1392184&r2=1392185&view=diff
==============================================================================
--- incubator/hcatalog/trunk/CHANGES.txt (original)
+++ incubator/hcatalog/trunk/CHANGES.txt Mon Oct 1 03:26:24 2012
@@ -117,6 +117,8 @@ Trunk (unreleased changes)
OPTIMIZATIONS
BUG FIXES
+ HCAT-513 Data Store onto HCatalog table fails for dynamic partitioning as
the temporary directory gets deleted by the completed map tasks (amalakar via
toffer)
+
HCAT-497 HCatContext should use the jobconf instead of its own conf
(traviscrawford)
HCAT-494 MultiOutputFormat in 0.23 fails to setAliasConf() correctly.
(mithun via toffer)
Modified:
incubator/hcatalog/trunk/src/java/org/apache/hcatalog/mapreduce/FileOutputCommitterContainer.java
URL:
http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/java/org/apache/hcatalog/mapreduce/FileOutputCommitterContainer.java?rev=1392185&r1=1392184&r2=1392185&view=diff
==============================================================================
---
incubator/hcatalog/trunk/src/java/org/apache/hcatalog/mapreduce/FileOutputCommitterContainer.java
(original)
+++
incubator/hcatalog/trunk/src/java/org/apache/hcatalog/mapreduce/FileOutputCommitterContainer.java
Mon Oct 1 03:26:24 2012
@@ -207,6 +207,10 @@ class FileOutputCommitterContainer exten
public void commitJob(JobContext jobContext) throws IOException {
if (dynamicPartitioningUsed) {
discoverPartitions(jobContext);
+ // Commit each partition so it gets moved out of the job work dir
+ for (JobContext context : contextDiscoveredByPath.values()) {
+ new
JobConf(context.getConfiguration()).getOutputCommitter().commitJob(context);
+ }
}
if (getBaseOutputCommitter() != null && !dynamicPartitioningUsed) {
getBaseOutputCommitter().commitJob(HCatMapRedUtil.createJobContext(jobContext));
@@ -675,8 +679,13 @@ class FileOutputCommitterContainer exten
LinkedHashMap<String, String> fullPartSpec = new
LinkedHashMap<String, String>();
Warehouse.makeSpecFromName(fullPartSpec, st.getPath());
partitionsDiscoveredByPath.put(st.getPath().toString(),
fullPartSpec);
- JobContext currContext =
HCatHadoopShims.Instance.get().createJobContext(context.getConfiguration(),
context.getJobID());
- HCatOutputFormat.configureOutputStorageHandler(context,
jobInfo, fullPartSpec);
+ JobConf jobConf = (JobConf)context.getConfiguration();
+ JobContext currContext = HCatMapRedUtil.createJobContext(
+ jobConf,
+ context.getJobID(),
+
InternalUtil.createReporter(HCatMapRedUtil.createTaskAttemptContext(jobConf,
+
HCatHadoopShims.Instance.get().createTaskAttemptID())));
+
HCatOutputFormat.configureOutputStorageHandler(currContext, jobInfo,
fullPartSpec);
contextDiscoveredByPath.put(st.getPath().toString(),
currContext);
}
}
Modified:
incubator/hcatalog/trunk/src/java/org/apache/hcatalog/mapreduce/FileRecordWriterContainer.java
URL:
http://svn.apache.org/viewvc/incubator/hcatalog/trunk/src/java/org/apache/hcatalog/mapreduce/FileRecordWriterContainer.java?rev=1392185&r1=1392184&r2=1392185&view=diff
==============================================================================
---
incubator/hcatalog/trunk/src/java/org/apache/hcatalog/mapreduce/FileRecordWriterContainer.java
(original)
+++
incubator/hcatalog/trunk/src/java/org/apache/hcatalog/mapreduce/FileRecordWriterContainer.java
Mon Oct 1 03:26:24 2012
@@ -144,8 +144,6 @@ class FileRecordWriterContainer extends
if (baseOutputCommitter.needsTaskCommit(currContext)) {
baseOutputCommitter.commitTask(currContext);
}
- org.apache.hadoop.mapred.JobContext currJobContext =
HCatMapRedUtil.createJobContext(currContext);
- baseOutputCommitter.commitJob(currJobContext);
}
} else {
getBaseRecordWriter().close(reporter);