Author: rvs
Date: Fri Nov  4 16:08:04 2011
New Revision: 1197622

URL: http://svn.apache.org/viewvc?rev=1197622&view=rev
Log:
HIVE-2468. Make Hive compile against Hadoop 0.22

Modified:
    incubator/bigtop/branches/hadoop-0.23/bigtop-packages/src/common/hive/patch
    
incubator/bigtop/branches/hadoop-0.23/bigtop-packages/src/rpm/hive/SPECS/hive.spec
    incubator/bigtop/branches/hadoop-0.23/bigtop.mk

Modified: 
incubator/bigtop/branches/hadoop-0.23/bigtop-packages/src/common/hive/patch
URL: 
http://svn.apache.org/viewvc/incubator/bigtop/branches/hadoop-0.23/bigtop-packages/src/common/hive/patch?rev=1197622&r1=1197621&r2=1197622&view=diff
==============================================================================
--- incubator/bigtop/branches/hadoop-0.23/bigtop-packages/src/common/hive/patch 
(original)
+++ incubator/bigtop/branches/hadoop-0.23/bigtop-packages/src/common/hive/patch 
Fri Nov  4 16:08:04 2011
@@ -1,27 +1,220 @@
-diff --git build.properties build.properties
-index fc80b03..6247e4b 100644
---- build.properties
-+++ build.properties
-@@ -10,10 +10,10 @@ javac.deprecation=off
- javac.args=
- javac.args.warnings=
+Index: shims/src/0.20/java/org/apache/hadoop/hive/shims/Hadoop20Shims.java
+===================================================================
+--- shims/src/0.20/java/org/apache/hadoop/hive/shims/Hadoop20Shims.java        
(revision 1196775)
++++ shims/src/0.20/java/org/apache/hadoop/hive/shims/Hadoop20Shims.java        
(working copy)
+@@ -35,6 +35,7 @@
+ import org.apache.hadoop.hive.io.HiveIOExceptionHandlerChain;
+ import org.apache.hadoop.hive.io.HiveIOExceptionHandlerUtil;
+ import org.apache.hadoop.io.Text;
++import org.apache.hadoop.mapred.ClusterStatus;
+ import org.apache.hadoop.mapred.FileInputFormat;
+ import org.apache.hadoop.mapred.InputFormat;
+ import org.apache.hadoop.mapred.InputSplit;
+@@ -51,9 +52,12 @@
+ import org.apache.hadoop.mapred.lib.CombineFileInputFormat;
+ import org.apache.hadoop.mapred.lib.CombineFileSplit;
+ import org.apache.hadoop.mapred.lib.NullOutputFormat;
++import org.apache.hadoop.mapreduce.Job;
++import org.apache.hadoop.mapreduce.TaskAttemptID;
+ import org.apache.hadoop.security.UnixUserGroupInformation;
+ import org.apache.hadoop.security.UserGroupInformation;
+ import org.apache.hadoop.tools.HadoopArchives;
++import org.apache.hadoop.util.Progressable;
+ import org.apache.hadoop.util.ToolRunner;
  
--hadoop.version=0.20.1
--hadoop.security.version=0.20.3-CDH3-SNAPSHOT
--hadoop.mirror=http://mirror.facebook.net/facebook/hive-deps
--hadoop.mirror2=http://archive.cloudera.com/hive-deps
-+hadoop.version=0.23.0
-+hadoop.security.version=0.23.0
-+hadoop.mirror=http://people.apache.org/~rvs/
-+hadoop.mirror2=http://people.apache.org/~rvs/
+ /**
+@@ -211,7 +215,7 @@
+     protected RecordReader<K, V> curReader;
+     protected boolean isShrinked;
+     protected long shrinkedLength;
+-    
++
+     public boolean next(K key, V value) throws IOException {
  
- build.dir.hive=${hive.root}/build
- build.dir.hadoop=${build.dir.hive}/hadoopcore
-diff --git shims/src/0.23/java/org/apache/hadoop/fs/ProxyFileSystem.java 
shims/src/0.23/java/org/apache/hadoop/fs/ProxyFileSystem.java
-new file mode 100644
-index 0000000..218236f
---- /dev/null
-+++ shims/src/0.23/java/org/apache/hadoop/fs/ProxyFileSystem.java
+       while ((curReader == null)
+@@ -287,9 +291,9 @@
+       }
+       initNextRecordReader(null);
+     }
+-    
++
+     /**
+-     * do next and handle exception inside it. 
++     * do next and handle exception inside it.
+      * @param key
+      * @param value
+      * @return
+@@ -504,4 +508,33 @@
+   public String getTokenStrForm(String tokenSignature) throws IOException {
+     throw new UnsupportedOperationException("Tokens are not supported in 
current hadoop version");
+   }
++
++  @Override
++  public JobTrackerState getJobTrackerState(ClusterStatus clusterStatus) 
throws Exception {
++    JobTrackerState state;
++    switch (clusterStatus.getJobTrackerState()) {
++    case INITIALIZING:
++      return JobTrackerState.INITIALIZING;
++    case RUNNING:
++      return JobTrackerState.RUNNING;
++    default:
++      String errorMsg = "Unrecognized JobTracker state: " + 
clusterStatus.getJobTrackerState();
++      throw new Exception(errorMsg);
++    }
++  }
++
++  @Override
++  public org.apache.hadoop.mapreduce.TaskAttemptContext 
newTaskAttemptContext(Configuration conf, final Progressable progressable) {
++    return new org.apache.hadoop.mapreduce.TaskAttemptContext(conf, new 
TaskAttemptID()) {
++      @Override
++      public void progress() {
++        progressable.progress();
++      }
++    };
++  }
++
++  @Override
++  public org.apache.hadoop.mapreduce.JobContext newJobContext(Job job) {
++    return new org.apache.hadoop.mapreduce.JobContext(job.getConfiguration(), 
job.getJobID());
++  }
+ }
+Index: shims/src/0.20S/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java
+===================================================================
+--- shims/src/0.20S/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java      
(revision 1196775)
++++ shims/src/0.20S/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java      
(working copy)
+@@ -38,6 +38,7 @@
+ import org.apache.hadoop.hive.thrift.DelegationTokenSelector;
+ import org.apache.hadoop.io.Text;
+ import org.apache.hadoop.io.WritableComparable;
++import org.apache.hadoop.mapred.ClusterStatus;
+ import org.apache.hadoop.mapred.FileInputFormat;
+ import org.apache.hadoop.mapred.InputFormat;
+ import org.apache.hadoop.mapred.InputSplit;
+@@ -54,11 +55,14 @@
+ import org.apache.hadoop.mapred.lib.CombineFileInputFormat;
+ import org.apache.hadoop.mapred.lib.CombineFileSplit;
+ import org.apache.hadoop.mapred.lib.NullOutputFormat;
++import org.apache.hadoop.mapreduce.Job;
++import org.apache.hadoop.mapreduce.TaskAttemptID;
+ import org.apache.hadoop.security.UserGroupInformation;
+ import org.apache.hadoop.security.token.Token;
+ import org.apache.hadoop.security.token.TokenIdentifier;
+ import org.apache.hadoop.security.token.TokenSelector;
+ import org.apache.hadoop.tools.HadoopArchives;
++import org.apache.hadoop.util.Progressable;
+ import org.apache.hadoop.util.ToolRunner;
+ 
+ /**
+@@ -511,4 +515,33 @@
+         tokenSignature == null ? new Text() : new Text(tokenSignature), 
ugi.getTokens());
+     return token != null ? token.encodeToUrlString() : null;
+   }
++  
++  @Override
++  public JobTrackerState getJobTrackerState(ClusterStatus clusterStatus) 
throws Exception {
++    JobTrackerState state;
++    switch (clusterStatus.getJobTrackerState()) {
++    case INITIALIZING:
++      return JobTrackerState.INITIALIZING;
++    case RUNNING:
++      return JobTrackerState.RUNNING;
++    default:
++      String errorMsg = "Unrecognized JobTracker state: " + 
clusterStatus.getJobTrackerState();
++      throw new Exception(errorMsg);
++    }
++  }
++  
++  @Override
++  public org.apache.hadoop.mapreduce.TaskAttemptContext 
newTaskAttemptContext(Configuration conf, final Progressable progressable) {
++    return new org.apache.hadoop.mapreduce.TaskAttemptContext(conf, new 
TaskAttemptID()) {
++      @Override
++      public void progress() {
++        progressable.progress();
++      }
++    };
++  }
++
++  @Override
++  public org.apache.hadoop.mapreduce.JobContext newJobContext(Job job) {
++    return new org.apache.hadoop.mapreduce.JobContext(job.getConfiguration(), 
job.getJobID());
++  }
+ }
+Index: shims/src/0.23/java/org/apache/hadoop/fs/ProxyLocalFileSystem.java
+===================================================================
+--- shims/src/0.23/java/org/apache/hadoop/fs/ProxyLocalFileSystem.java 
(revision 0)
++++ shims/src/0.23/java/org/apache/hadoop/fs/ProxyLocalFileSystem.java 
(revision 0)
+@@ -0,0 +1,61 @@
++/**
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements.  See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership.  The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License.  You may obtain a copy of the License at
++ *
++ *     http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++
++package org.apache.hadoop.fs;
++
++import java.io.*;
++import java.net.URI;
++import java.net.URISyntaxException;
++
++import org.apache.hadoop.conf.Configuration;
++import org.apache.hadoop.fs.permission.FsPermission;
++import org.apache.hadoop.util.Progressable;
++
++/****************************************************************
++ * A Proxy for LocalFileSystem
++ *
++ * Serves uri's corresponding to 'pfile:///' namespace with using
++ * a LocalFileSystem 
++ *****************************************************************/
++
++public class ProxyLocalFileSystem extends FilterFileSystem {
++
++  protected LocalFileSystem localFs;
++
++  public ProxyLocalFileSystem() {
++    localFs = new LocalFileSystem();
++  }
++
++  public ProxyLocalFileSystem(FileSystem fs) {
++    throw new RuntimeException ("Unsupported Constructor");
++  }
++
++  @Override
++  public void initialize(URI name, Configuration conf) throws IOException {
++    // create a proxy for the local filesystem
++    // the scheme/authority serving as the proxy is derived
++    // from the supplied URI
++
++    String scheme = name.getScheme();
++    String authority = name.getAuthority() != null ? name.getAuthority() : "";
++    String proxyUriString = name + "://" + authority + "/";
++    fs = new ProxyFileSystem(localFs, URI.create(proxyUriString));
++
++    fs.initialize(name, conf);
++  }
++}
+
+Property changes on: 
shims/src/0.23/java/org/apache/hadoop/fs/ProxyLocalFileSystem.java
+___________________________________________________________________
+Added: svn:eol-style
+   + native
+
+Index: shims/src/0.23/java/org/apache/hadoop/fs/ProxyFileSystem.java
+===================================================================
+--- shims/src/0.23/java/org/apache/hadoop/fs/ProxyFileSystem.java      
(revision 0)
++++ shims/src/0.23/java/org/apache/hadoop/fs/ProxyFileSystem.java      
(revision 0)
 @@ -0,0 +1,273 @@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
@@ -296,12 +489,17 @@ index 0000000..218236f
 +  }
 +}
 +  
-diff --git shims/src/0.23/java/org/apache/hadoop/fs/ProxyLocalFileSystem.java 
shims/src/0.23/java/org/apache/hadoop/fs/ProxyLocalFileSystem.java
-new file mode 100644
-index 0000000..b28bf4e
---- /dev/null
-+++ shims/src/0.23/java/org/apache/hadoop/fs/ProxyLocalFileSystem.java
-@@ -0,0 +1,61 @@
+
+Property changes on: 
shims/src/0.23/java/org/apache/hadoop/fs/ProxyFileSystem.java
+___________________________________________________________________
+Added: svn:eol-style
+   + native
+
+Index: 
shims/src/0.23/java/org/apache/hadoop/hive/thrift/DelegationTokenIdentifier.java
+===================================================================
+--- 
shims/src/0.23/java/org/apache/hadoop/hive/thrift/DelegationTokenIdentifier.java
   (revision 0)
++++ 
shims/src/0.23/java/org/apache/hadoop/hive/thrift/DelegationTokenIdentifier.java
   (revision 0)
+@@ -0,0 +1,52 @@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
@@ -320,65 +518,208 @@ index 0000000..b28bf4e
 + * limitations under the License.
 + */
 +
-+package org.apache.hadoop.fs;
++package org.apache.hadoop.hive.thrift;
 +
-+import java.io.*;
-+import java.net.URI;
-+import java.net.URISyntaxException;
++import org.apache.hadoop.io.Text;
++import 
org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
 +
-+import org.apache.hadoop.conf.Configuration;
-+import org.apache.hadoop.fs.permission.FsPermission;
-+import org.apache.hadoop.util.Progressable;
++/**
++ * A delegation token identifier that is specific to Hive.
++ */
++public class DelegationTokenIdentifier
++    extends AbstractDelegationTokenIdentifier {
++  public static final Text HIVE_DELEGATION_KIND = new 
Text("HIVE_DELEGATION_TOKEN");
 +
-+/****************************************************************
-+ * A Proxy for LocalFileSystem
++  /**
++   * Create an empty delegation token identifier for reading into.
++   */
++  public DelegationTokenIdentifier() {
++  }
++
++  /**
++   * Create a new delegation token identifier
++   * @param owner the effective username of the token owner
++   * @param renewer the username of the renewer
++   * @param realUser the real username of the token owner
++   */
++  public DelegationTokenIdentifier(Text owner, Text renewer, Text realUser) {
++    super(owner, renewer, realUser);
++  }
++
++  @Override
++  public Text getKind() {
++    return HIVE_DELEGATION_KIND;
++  }
++
++}
+
+Property changes on: 
shims/src/0.23/java/org/apache/hadoop/hive/thrift/DelegationTokenIdentifier.java
+___________________________________________________________________
+Added: svn:eol-style
+   + native
+
+Index: 
shims/src/0.23/java/org/apache/hadoop/hive/thrift/DelegationTokenSecretManager.java
+===================================================================
+--- 
shims/src/0.23/java/org/apache/hadoop/hive/thrift/DelegationTokenSecretManager.java
        (revision 0)
++++ 
shims/src/0.23/java/org/apache/hadoop/hive/thrift/DelegationTokenSecretManager.java
        (revision 0)
+@@ -0,0 +1,87 @@
++/**
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements.  See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership.  The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License.  You may obtain a copy of the License at
 + *
-+ * Serves uri's corresponding to 'pfile:///' namespace with using
-+ * a LocalFileSystem 
-+ *****************************************************************/
++ *     http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
 +
-+public class ProxyLocalFileSystem extends FilterFileSystem {
++package org.apache.hadoop.hive.thrift;
 +
-+  protected LocalFileSystem localFs;
++import java.io.IOException;
 +
-+  public ProxyLocalFileSystem() {
-+    localFs = new LocalFileSystem();
-+  }
++import org.apache.hadoop.io.Text;
++import org.apache.hadoop.security.UserGroupInformation;
++import org.apache.hadoop.security.token.Token;
++import 
org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
 +
-+  public ProxyLocalFileSystem(FileSystem fs) {
-+    throw new RuntimeException ("Unsupported Constructor");
++/**
++ * A Hive specific delegation token secret manager.
++ * The secret manager is responsible for generating and accepting the password
++ * for each token.
++ */
++public class DelegationTokenSecretManager
++    extends AbstractDelegationTokenSecretManager<DelegationTokenIdentifier> {
++
++  /**
++   * Create a secret manager
++   * @param delegationKeyUpdateInterval the number of seconds for rolling new
++   *        secret keys.
++   * @param delegationTokenMaxLifetime the maximum lifetime of the delegation
++   *        tokens
++   * @param delegationTokenRenewInterval how often the tokens must be renewed
++   * @param delegationTokenRemoverScanInterval how often the tokens are 
scanned
++   *        for expired tokens
++   */
++  public DelegationTokenSecretManager(long delegationKeyUpdateInterval,
++                                      long delegationTokenMaxLifetime,
++                                      long delegationTokenRenewInterval,
++                                      long 
delegationTokenRemoverScanInterval) {
++    super(delegationKeyUpdateInterval, delegationTokenMaxLifetime,
++          delegationTokenRenewInterval, delegationTokenRemoverScanInterval);
 +  }
 +
 +  @Override
-+  public void initialize(URI name, Configuration conf) throws IOException {
-+    // create a proxy for the local filesystem
-+    // the scheme/authority serving as the proxy is derived
-+    // from the supplied URI
++  public DelegationTokenIdentifier createIdentifier() {
++    return new DelegationTokenIdentifier();
++  }
 +
-+    String scheme = name.getScheme();
-+    String authority = name.getAuthority() != null ? name.getAuthority() : "";
-+    String proxyUriString = name + "://" + authority + "/";
-+    fs = new ProxyFileSystem(localFs, URI.create(proxyUriString));
++  public synchronized void cancelDelegationToken(String tokenStrForm) throws 
IOException {
++    Token<DelegationTokenIdentifier> t= new 
Token<DelegationTokenIdentifier>();
++    t.decodeFromUrlString(tokenStrForm);
++    String user = UserGroupInformation.getCurrentUser().getUserName();
++    cancelToken(t, user);
++  }
 +
-+    fs.initialize(name, conf);
++  public synchronized long renewDelegationToken(String tokenStrForm) throws 
IOException {
++    Token<DelegationTokenIdentifier> t= new 
Token<DelegationTokenIdentifier>();
++    t.decodeFromUrlString(tokenStrForm);
++    String user = UserGroupInformation.getCurrentUser().getUserName();
++    return renewToken(t, user);
++  }
++
++  public synchronized String getDelegationToken(String renewer) throws 
IOException {
++    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
++    Text owner = new Text(ugi.getUserName());
++    Text realUser = null;
++    if (ugi.getRealUser() != null) {
++      realUser = new Text(ugi.getRealUser().getUserName());
++    }
++    DelegationTokenIdentifier ident =
++      new DelegationTokenIdentifier(owner, new Text(renewer), realUser);
++    Token<DelegationTokenIdentifier> t = new Token<DelegationTokenIdentifier>(
++        ident, this);
++    return t.encodeToUrlString();
 +  }
 +}
-diff --git shims/src/0.23/java/org/apache/hadoop/hive/shims/EmptyShim.java 
shims/src/0.23/java/org/apache/hadoop/hive/shims/EmptyShim.java
-new file mode 100644
-index 0000000..0e5715d
---- /dev/null
-+++ shims/src/0.23/java/org/apache/hadoop/hive/shims/EmptyShim.java
++
+
+Property changes on: 
shims/src/0.23/java/org/apache/hadoop/hive/thrift/DelegationTokenSecretManager.java
+___________________________________________________________________
+Added: svn:eol-style
+   + native
+
+Index: 
shims/src/0.23/java/org/apache/hadoop/hive/thrift/DelegationTokenSelector.java
+===================================================================
+--- 
shims/src/0.23/java/org/apache/hadoop/hive/thrift/DelegationTokenSelector.java  
   (revision 0)
++++ 
shims/src/0.23/java/org/apache/hadoop/hive/thrift/DelegationTokenSelector.java  
   (revision 0)
+@@ -0,0 +1,33 @@
++/**
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements.  See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership.  The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License.  You may obtain a copy of the License at
++ *
++ *     http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++
++package org.apache.hadoop.hive.thrift;
++
++import 
org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelector;
++
++/**
++ * A delegation token that is specialized for Hive
++ */
++
++public class DelegationTokenSelector
++    extends AbstractDelegationTokenSelector<DelegationTokenIdentifier>{
++
++  public DelegationTokenSelector() {
++    super(DelegationTokenIdentifier.HIVE_DELEGATION_KIND);
++  }
++}
+
+Property changes on: 
shims/src/0.23/java/org/apache/hadoop/hive/thrift/DelegationTokenSelector.java
+___________________________________________________________________
+Added: svn:eol-style
+   + native
+
+Index: shims/src/0.23/java/org/apache/hadoop/hive/shims/EmptyShim.java
+===================================================================
+--- shims/src/0.23/java/org/apache/hadoop/hive/shims/EmptyShim.java    
(revision 0)
++++ shims/src/0.23/java/org/apache/hadoop/hive/shims/EmptyShim.java    
(revision 0)
 @@ -0,0 +1,4 @@
 +package org.apache.hadoop.hive.shims;
 +
 +class EmptyShim {
 +}
-diff --git shims/src/0.23/java/org/apache/hadoop/hive/shims/Hadoop20Shims.java 
shims/src/0.23/java/org/apache/hadoop/hive/shims/Hadoop20Shims.java
-new file mode 100644
-index 0000000..3da7e28
---- /dev/null
-+++ shims/src/0.23/java/org/apache/hadoop/hive/shims/Hadoop20Shims.java
-@@ -0,0 +1,501 @@
+
+Property changes on: 
shims/src/0.23/java/org/apache/hadoop/hive/shims/EmptyShim.java
+___________________________________________________________________
+Added: svn:eol-style
+   + native
+
+Index: shims/src/0.23/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java
+===================================================================
+--- shims/src/0.23/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java        
(revision 0)
++++ shims/src/0.23/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java        
(revision 0)
+@@ -0,0 +1,546 @@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
@@ -415,7 +756,10 @@ index 0000000..3da7e28
 +import org.apache.hadoop.hdfs.MiniDFSCluster;
 +import org.apache.hadoop.hive.io.HiveIOExceptionHandlerChain;
 +import org.apache.hadoop.hive.io.HiveIOExceptionHandlerUtil;
++import org.apache.hadoop.hive.shims.HadoopShims.JobTrackerState;
++import org.apache.hadoop.hive.thrift.DelegationTokenSelector;
 +import org.apache.hadoop.io.Text;
++import org.apache.hadoop.mapred.ClusterStatus;
 +import org.apache.hadoop.mapred.FileInputFormat;
 +import org.apache.hadoop.mapred.InputFormat;
 +import org.apache.hadoop.mapred.InputSplit;
@@ -432,14 +776,22 @@ index 0000000..3da7e28
 +import org.apache.hadoop.mapred.lib.CombineFileInputFormat;
 +import org.apache.hadoop.mapred.lib.CombineFileSplit;
 +import org.apache.hadoop.mapred.lib.NullOutputFormat;
++import org.apache.hadoop.mapreduce.Job;
++import org.apache.hadoop.mapreduce.TaskAttemptID;
++import org.apache.hadoop.mapreduce.task.JobContextImpl;
++import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
 +import org.apache.hadoop.security.UserGroupInformation;
++import org.apache.hadoop.security.token.Token;
++import org.apache.hadoop.security.token.TokenIdentifier;
++import org.apache.hadoop.security.token.TokenSelector;
 +import org.apache.hadoop.tools.HadoopArchives;
++import org.apache.hadoop.util.Progressable;
 +import org.apache.hadoop.util.ToolRunner;
 +
 +/**
-+ * Implemention of shims against Hadoop 0.20.0.
++ * Implemention of shims against Hadoop 0.23.0.
 + */
-+public class Hadoop20Shims implements HadoopShims {
++public class Hadoop23Shims implements HadoopShims {
 +  public boolean usesJobShell() {
 +    return false;
 +  }
@@ -849,7 +1201,7 @@ index 0000000..3da7e28
 +
 +  public void setNullOutputFormat(JobConf conf) {
 +    conf.setOutputFormat(NullOutputFormat.class);
-+    conf.setOutputCommitter(Hadoop20Shims.NullOutputCommitter.class);
++    conf.setOutputCommitter(Hadoop23Shims.NullOutputCommitter.class);
 +
 +    // option to bypass job setup and cleanup was introduced in hadoop-21 
(MAPREDUCE-463)
 +    // but can be backported. So we disable setup/cleanup in all versions >= 
0.19
@@ -867,24 +1219,130 @@ index 0000000..3da7e28
 +
 +  @Override
 +  public boolean isSecureShimImpl() {
-+    return false;
++    return true;
 +  }
 +
 +  @Override
 +  public String getShortUserName(UserGroupInformation ugi) {
-+    return ugi.getUserName();
++    return ugi.getShortUserName();
 +  }
 +
 +  @Override
 +  public String getTokenStrForm(String tokenSignature) throws IOException {
-+    throw new UnsupportedOperationException("Tokens are not supported in 
current hadoop version");
++    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
++    TokenSelector<? extends TokenIdentifier> tokenSelector = new 
DelegationTokenSelector();
++
++    Token<? extends TokenIdentifier> token = tokenSelector.selectToken(
++        tokenSignature == null ? new Text() : new Text(tokenSignature), 
ugi.getTokens());
++    return token != null ? token.encodeToUrlString() : null;
++  }
++  
++  @Override
++  public JobTrackerState getJobTrackerState(ClusterStatus clusterStatus) 
throws Exception {
++    JobTrackerState state;
++    switch (clusterStatus.getJobTrackerStatus()) {
++    case INITIALIZING:
++      return JobTrackerState.INITIALIZING;
++    case RUNNING:
++      return JobTrackerState.RUNNING;
++    default:
++      String errorMsg = "Unrecognized JobTracker state: " + 
clusterStatus.getJobTrackerStatus();
++      throw new Exception(errorMsg);
++    }
++  }
++  
++  @Override
++  public org.apache.hadoop.mapreduce.TaskAttemptContext 
newTaskAttemptContext(Configuration conf, final Progressable progressable) {
++    return new TaskAttemptContextImpl(conf, new TaskAttemptID()) {
++      @Override
++      public void progress() {
++        progressable.progress();
++      }
++    };
++  }
++
++  @Override
++  public org.apache.hadoop.mapreduce.JobContext newJobContext(Job job) {
++    return new JobContextImpl(job.getConfiguration(), job.getJobID());
 +  }
 +}
-diff --git 
shims/src/0.23/java/org/apache/hadoop/hive/shims/HiveHarFileSystem.java 
shims/src/0.23/java/org/apache/hadoop/hive/shims/HiveHarFileSystem.java
-new file mode 100644
-index 0000000..323ebbb
---- /dev/null
-+++ shims/src/0.23/java/org/apache/hadoop/hive/shims/HiveHarFileSystem.java
+
+Property changes on: 
shims/src/0.23/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java
+___________________________________________________________________
+Added: svn:eol-style
+   + native
+
+Index: shims/src/0.23/java/org/apache/hadoop/hive/shims/Jetty23Shims.java
+===================================================================
+--- shims/src/0.23/java/org/apache/hadoop/hive/shims/Jetty23Shims.java 
(revision 0)
++++ shims/src/0.23/java/org/apache/hadoop/hive/shims/Jetty23Shims.java 
(revision 0)
+@@ -0,0 +1,56 @@
++/**
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements.  See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership.  The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License.  You may obtain a copy of the License at
++ *
++ *     http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.hadoop.hive.shims;
++
++import java.io.IOException;
++
++import org.mortbay.jetty.bio.SocketConnector;
++import org.mortbay.jetty.handler.RequestLogHandler;
++import org.mortbay.jetty.webapp.WebAppContext;
++
++/**
++ * Jetty23Shims.
++ *
++ */
++public class Jetty23Shims implements JettyShims {
++  public Server startServer(String listen, int port) throws IOException {
++    Server s = new Server();
++    s.setupListenerHostPort(listen, port);
++    return s;
++  }
++
++  private static class Server extends org.mortbay.jetty.Server implements 
JettyShims.Server {
++    public void addWar(String war, String contextPath) {
++      WebAppContext wac = new WebAppContext();
++      wac.setContextPath(contextPath);
++      wac.setWar(war);
++      RequestLogHandler rlh = new RequestLogHandler();
++      rlh.setHandler(wac);
++      this.addHandler(rlh);
++    }
++
++    public void setupListenerHostPort(String listen, int port)
++        throws IOException {
++
++      SocketConnector connector = new SocketConnector();
++      connector.setPort(port);
++      connector.setHost(listen);
++      this.addConnector(connector);
++    }
++  }
++}
+
+Property changes on: 
shims/src/0.23/java/org/apache/hadoop/hive/shims/Jetty23Shims.java
+___________________________________________________________________
+Added: svn:eol-style
+   + native
+
+Index: shims/src/0.23/java/org/apache/hadoop/hive/shims/HiveHarFileSystem.java
+===================================================================
+--- shims/src/0.23/java/org/apache/hadoop/hive/shims/HiveHarFileSystem.java    
(revision 0)
++++ shims/src/0.23/java/org/apache/hadoop/hive/shims/HiveHarFileSystem.java    
(revision 0)
 @@ -0,0 +1,66 @@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
@@ -952,65 +1410,289 @@ index 0000000..323ebbb
 +    return new ContentSummary(summary[0], summary[1], summary[2]);
 +  }
 +}
-diff --git shims/src/0.23/java/org/apache/hadoop/hive/shims/Jetty20Shims.java 
shims/src/0.23/java/org/apache/hadoop/hive/shims/Jetty20Shims.java
-new file mode 100644
-index 0000000..13c6b31
---- /dev/null
-+++ shims/src/0.23/java/org/apache/hadoop/hive/shims/Jetty20Shims.java
-@@ -0,0 +1,56 @@
-+/**
-+ * Licensed to the Apache Software Foundation (ASF) under one
-+ * or more contributor license agreements.  See the NOTICE file
-+ * distributed with this work for additional information
-+ * regarding copyright ownership.  The ASF licenses this file
-+ * to you under the Apache License, Version 2.0 (the
-+ * "License"); you may not use this file except in compliance
-+ * with the License.  You may obtain a copy of the License at
-+ *
-+ *     http://www.apache.org/licenses/LICENSE-2.0
-+ *
-+ * Unless required by applicable law or agreed to in writing, software
-+ * distributed under the License is distributed on an "AS IS" BASIS,
-+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-+ * See the License for the specific language governing permissions and
-+ * limitations under the License.
-+ */
-+package org.apache.hadoop.hive.shims;
+
+Property changes on: 
shims/src/0.23/java/org/apache/hadoop/hive/shims/HiveHarFileSystem.java
+___________________________________________________________________
+Added: svn:eol-style
+   + native
+
+Index: shims/src/common/java/org/apache/hadoop/hive/shims/ShimLoader.java
+===================================================================
+--- shims/src/common/java/org/apache/hadoop/hive/shims/ShimLoader.java 
(revision 1196775)
++++ shims/src/common/java/org/apache/hadoop/hive/shims/ShimLoader.java 
(working copy)
+@@ -40,6 +40,7 @@
+   static {
+     HADOOP_SHIM_CLASSES.put("0.20", 
"org.apache.hadoop.hive.shims.Hadoop20Shims");
+     HADOOP_SHIM_CLASSES.put("0.20S", 
"org.apache.hadoop.hive.shims.Hadoop20SShims");
++    HADOOP_SHIM_CLASSES.put("0.23", 
"org.apache.hadoop.hive.shims.Hadoop23Shims");
+   }
+ 
+   /**
+@@ -52,6 +53,7 @@
+   static {
+     JETTY_SHIM_CLASSES.put("0.20", 
"org.apache.hadoop.hive.shims.Jetty20Shims");
+     JETTY_SHIM_CLASSES.put("0.20S", 
"org.apache.hadoop.hive.shims.Jetty20SShims");
++    JETTY_SHIM_CLASSES.put("0.23", 
"org.apache.hadoop.hive.shims.Jetty23Shims");
+   }
+ 
+   /**
+@@ -122,7 +124,9 @@
+     try {
+       Class.forName("org.apache.hadoop.security.UnixUserGroupInformation");
+     } catch (ClassNotFoundException cnf) {
+-      majorVersion += "S";
++      if ("0.20".equals(majorVersion)) {
++        majorVersion += "S";
++      }
+     }
+     return majorVersion;
+   }
+Index: shims/src/common/java/org/apache/hadoop/hive/shims/HadoopShims.java
+===================================================================
+--- shims/src/common/java/org/apache/hadoop/hive/shims/HadoopShims.java        
(revision 1196775)
++++ shims/src/common/java/org/apache/hadoop/hive/shims/HadoopShims.java        
(working copy)
+@@ -31,6 +31,7 @@
+ import org.apache.hadoop.fs.Path;
+ import org.apache.hadoop.fs.PathFilter;
+ import org.apache.hadoop.io.Text;
++import org.apache.hadoop.mapred.ClusterStatus;
+ import org.apache.hadoop.mapred.InputFormat;
+ import org.apache.hadoop.mapred.InputSplit;
+ import org.apache.hadoop.mapred.JobConf;
+@@ -38,7 +39,11 @@
+ import org.apache.hadoop.mapred.Reporter;
+ import org.apache.hadoop.mapred.RunningJob;
+ import org.apache.hadoop.mapred.TaskCompletionEvent;
++import org.apache.hadoop.mapreduce.Job;
++import org.apache.hadoop.mapreduce.JobContext;
++import org.apache.hadoop.mapreduce.TaskAttemptContext;
+ import org.apache.hadoop.security.UserGroupInformation;
++import org.apache.hadoop.util.Progressable;
+ 
+ /**
+  * In order to be compatible with multiple versions of Hadoop, all parts
+@@ -189,7 +194,23 @@
+    */
+   String getTokenStrForm(String tokenSignature) throws IOException;
+ 
 +
-+import java.io.IOException;
+   /**
++   * Convert the ClusterStatus to its Thrift equivalent: JobTrackerState.
++   * See MAPREDUCE-2455 for why this is a part of the shim.
++   * @param clusterStatus
++   * @return the matching JobTrackerState
++   * @throws Exception if no equivalent JobTrackerState exists
++   */
++  enum JobTrackerState { INITIALIZING, RUNNING };
 +
-+import org.mortbay.jetty.bio.SocketConnector;
-+import org.mortbay.jetty.handler.RequestLogHandler;
-+import org.mortbay.jetty.webapp.WebAppContext;
++  public JobTrackerState getJobTrackerState(ClusterStatus clusterStatus) 
throws Exception;
 +
-+/**
-+ * Jetty20Shims.
-+ *
-+ */
-+public class Jetty20Shims implements JettyShims {
-+  public Server startServer(String listen, int port) throws IOException {
-+    Server s = new Server();
-+    s.setupListenerHostPort(listen, port);
-+    return s;
-+  }
++  public TaskAttemptContext newTaskAttemptContext(Configuration conf, final 
Progressable progressable);
 +
-+  private static class Server extends org.mortbay.jetty.Server implements 
JettyShims.Server {
-+    public void addWar(String war, String contextPath) {
-+      WebAppContext wac = new WebAppContext();
-+      wac.setContextPath(contextPath);
-+      wac.setWar(war);
-+      RequestLogHandler rlh = new RequestLogHandler();
-+      rlh.setHandler(wac);
-+      this.addHandler(rlh);
-+    }
++  public JobContext newJobContext(Job job);
 +
-+    public void setupListenerHostPort(String listen, int port)
-+        throws IOException {
++  /**
+    * InputSplitShim.
+    *
+    */
+Index: shims/build.xml
+===================================================================
+--- shims/build.xml    (revision 1196775)
++++ shims/build.xml    (working copy)
+@@ -66,7 +66,7 @@
+     </antcall>
+     <antcall target="build_shims" inheritRefs="false" inheritAll="false">
+       <param name="hadoop.version.ant-internal" 
value="${hadoop.security.version}" />
+-      <param name="hadoop.version.ant-internal.prefix" value="0.20S" />
++      <param name="hadoop.version.ant-internal.prefix" 
value="${hadoop.security.version.prefix}" />
+     </antcall>
+     <getversionpref property="hadoop.version.ant-internal.prefix" 
input="${hadoop.version}" />
+     <javac
+@@ -119,7 +119,7 @@
+     <echo message="Project: ${ant.project.name}"/>
+     <antcall target="compile_secure_test" inheritRefs="false" 
inheritAll="false">
+       <param name="hadoop.version.ant-internal" 
value="${hadoop.security.version}" />
+-      <param name="hadoop.version.ant-internal.prefix" value="0.20S" />
++      <param name="hadoop.version.ant-internal.prefix" 
value="${hadoop.security.version.prefix}" />
+     </antcall>
+   </target>
+ </project>
+Index: build.properties
+===================================================================
+--- build.properties   (revision 1196775)
++++ build.properties   (working copy)
+@@ -10,11 +10,18 @@
+ javac.args=
+ javac.args.warnings=
+ 
+-hadoop.version=0.20.1
+-hadoop.security.version=0.20.3-CDH3-SNAPSHOT
+-hadoop.mirror=http://mirror.facebook.net/facebook/hive-deps
+-hadoop.mirror2=http://archive.cloudera.com/hive-deps
++#hadoop.version=0.20.1
++#hadoop.security.version=0.20.3-CDH3-SNAPSHOT
++#hadoop.security.version.prefix=0.20S
++#hadoop.mirror=http://mirror.facebook.net/facebook/hive-deps
++#hadoop.mirror2=http://archive.cloudera.com/hive-deps
+ 
++hadoop.version=0.23.0
++hadoop.security.version=0.23.0
++hadoop.security.version.prefix=0.23
++hadoop.mirror=http://people.apache.org/~rvs/
++hadoop.mirror2=http://people.apache.org/~rvs/
 +
-+      SocketConnector connector = new SocketConnector();
-+      connector.setPort(port);
-+      connector.setHost(listen);
-+      this.addConnector(connector);
-+    }
-+  }
-+}
+ build.dir.hive=${hive.root}/build
+ build.dir.hadoop=${build.dir.hive}/hadoopcore
+ 
+Index: 
hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHBaseTableInputFormat.java
+===================================================================
+--- 
hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHBaseTableInputFormat.java
 (revision 1196775)
++++ 
hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHBaseTableInputFormat.java
 (working copy)
+@@ -54,6 +54,7 @@
+ import org.apache.hadoop.hive.serde2.lazy.LazyUtils;
+ import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+ import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
++import org.apache.hadoop.hive.shims.ShimLoader;
+ import org.apache.hadoop.io.Writable;
+ import org.apache.hadoop.mapred.InputFormat;
+ import org.apache.hadoop.mapred.InputSplit;
+@@ -155,15 +156,9 @@
+     setScan(scan);
+ 
+     Job job = new Job(jobConf);
+-    TaskAttemptContext tac =
+-      new TaskAttemptContext(job.getConfiguration(), new TaskAttemptID()) {
++    TaskAttemptContext tac = 
ShimLoader.getHadoopShims().newTaskAttemptContext(
++        job.getConfiguration(), reporter);
+ 
+-        @Override
+-        public void progress() {
+-          reporter.progress();
+-        }
+-      };
+-
+     final org.apache.hadoop.mapreduce.RecordReader<ImmutableBytesWritable, 
Result>
+     recordReader = createRecordReader(tableSplit, tac);
+ 
+@@ -354,7 +349,7 @@
+ 
+     return analyzer;
+   }
+-  
++
+   @Override
+   public InputSplit[] getSplits(JobConf jobConf, int numSplits) throws 
IOException {
+ 
+@@ -405,7 +400,7 @@
+ 
+     setScan(scan);
+     Job job = new Job(jobConf);
+-    JobContext jobContext = new JobContext(job.getConfiguration(), 
job.getJobID());
++    JobContext jobContext = ShimLoader.getHadoopShims().newJobContext(job);
+     Path [] tablePaths = FileInputFormat.getInputPaths(jobContext);
+ 
+     List<org.apache.hadoop.mapreduce.InputSplit> splits =
+Index: 
hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHBaseTableOutputFormat.java
+===================================================================
+--- 
hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHBaseTableOutputFormat.java
        (revision 1196775)
++++ 
hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHBaseTableOutputFormat.java
        (working copy)
+@@ -33,6 +33,7 @@
+ import org.apache.hadoop.hive.conf.HiveConf;
+ import org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter;
+ import org.apache.hadoop.hive.ql.io.HiveOutputFormat;
++import org.apache.hadoop.hive.shims.ShimLoader;
+ import org.apache.hadoop.io.Writable;
+ import org.apache.hadoop.mapred.JobConf;
+ import org.apache.hadoop.mapred.OutputFormat;
+@@ -104,8 +105,7 @@
+     String hbaseTableName = jc.get(HBaseSerDe.HBASE_TABLE_NAME);
+     jc.set(TableOutputFormat.OUTPUT_TABLE, hbaseTableName);
+     Job job = new Job(jc);
+-    JobContext jobContext =
+-      new JobContext(job.getConfiguration(), job.getJobID());
++    JobContext jobContext = ShimLoader.getHadoopShims().newJobContext(job);
+ 
+     try {
+       checkOutputSpecs(jobContext);
+Index: 
hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHFileOutputFormat.java
+===================================================================
+--- 
hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHFileOutputFormat.java  
   (revision 1196775)
++++ 
hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHFileOutputFormat.java  
   (working copy)
+@@ -35,6 +35,7 @@
+ import org.apache.hadoop.hbase.util.Bytes;
+ import org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter;
+ import org.apache.hadoop.hive.ql.io.HiveOutputFormat;
++import org.apache.hadoop.hive.shims.ShimLoader;
+ import org.apache.hadoop.io.Text;
+ import org.apache.hadoop.io.Writable;
+ import org.apache.hadoop.mapred.JobConf;
+@@ -95,12 +96,9 @@
+ 
+     // Create the HFile writer
+     final org.apache.hadoop.mapreduce.TaskAttemptContext tac =
+-      new TaskAttemptContext(job.getConfiguration(), new TaskAttemptID()) {
+-        @Override
+-        public void progress() {
+-          progressable.progress();
+-        }
+-      };
++      ShimLoader.getHadoopShims().newTaskAttemptContext(
++          job.getConfiguration(), progressable);
++
+     final Path outputdir = FileOutputFormat.getOutputPath(tac);
+     final org.apache.hadoop.mapreduce.RecordWriter<
+       ImmutableBytesWritable, KeyValue> fileWriter = getFileWriter(tac);
+Index: service/src/java/org/apache/hadoop/hive/service/HiveServer.java
+===================================================================
+--- service/src/java/org/apache/hadoop/hive/service/HiveServer.java    
(revision 1196775)
++++ service/src/java/org/apache/hadoop/hive/service/HiveServer.java    
(working copy)
+@@ -49,6 +49,7 @@
+ import org.apache.hadoop.hive.ql.processors.CommandProcessorFactory;
+ import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
+ import org.apache.hadoop.hive.ql.session.SessionState;
++import org.apache.hadoop.hive.shims.ShimLoader;
+ import org.apache.hadoop.mapred.ClusterStatus;
+ import org.apache.hadoop.mapred.JobTracker;
+ import org.apache.thrift.TException;
+@@ -241,22 +242,8 @@
+         drv.init();
+ 
+         ClusterStatus cs = drv.getClusterStatus();
+-        JobTracker.State jbs = cs.getJobTrackerState();
++        JobTrackerState state = 
JobTrackerState.valueOf(ShimLoader.getHadoopShims().getJobTrackerState(cs).name());
+ 
+-        // Convert the ClusterStatus to its Thrift equivalent: 
HiveClusterStatus
+-        JobTrackerState state;
+-        switch (jbs) {
+-        case INITIALIZING:
+-          state = JobTrackerState.INITIALIZING;
+-          break;
+-        case RUNNING:
+-          state = JobTrackerState.RUNNING;
+-          break;
+-        default:
+-          String errorMsg = "Unrecognized JobTracker state: " + 
jbs.toString();
+-          throw new Exception(errorMsg);
+-        }
+-
+         hcs = new HiveClusterStatus(cs.getTaskTrackers(), cs.getMapTasks(), cs
+             .getReduceTasks(), cs.getMaxMapTasks(), cs.getMaxReduceTasks(),
+             state);
+@@ -649,7 +636,7 @@
+       }
+     }
+   }
+-  
++
+   public static void main(String[] args) {
+     try {
+       HiveServerCli cli = new HiveServerCli();
+@@ -686,7 +673,7 @@
+         .protocolFactory(new TBinaryProtocol.Factory())
+         .minWorkerThreads(cli.minWorkerThreads)
+         .maxWorkerThreads(cli.maxWorkerThreads);
+-      
++
+       TServer server = new TThreadPoolServer(sargs);
+ 
+       String msg = "Starting hive server on port " + cli.port

Modified: 
incubator/bigtop/branches/hadoop-0.23/bigtop-packages/src/rpm/hive/SPECS/hive.spec
URL: 
http://svn.apache.org/viewvc/incubator/bigtop/branches/hadoop-0.23/bigtop-packages/src/rpm/hive/SPECS/hive.spec?rev=1197622&r1=1197621&r2=1197622&view=diff
==============================================================================
--- 
incubator/bigtop/branches/hadoop-0.23/bigtop-packages/src/rpm/hive/SPECS/hive.spec
 (original)
+++ 
incubator/bigtop/branches/hadoop-0.23/bigtop-packages/src/rpm/hive/SPECS/hive.spec
 Fri Nov  4 16:08:04 2011
@@ -115,7 +115,7 @@ This optional package hosts a metadata s
 
 
 %prep
-%setup -n apache-hive-6e882d8
+%setup -n apache-hive-f412000
 %patch0 -p0 
 
 %build

Modified: incubator/bigtop/branches/hadoop-0.23/bigtop.mk
URL: 
http://svn.apache.org/viewvc/incubator/bigtop/branches/hadoop-0.23/bigtop.mk?rev=1197622&r1=1197621&r2=1197622&view=diff
==============================================================================
--- incubator/bigtop/branches/hadoop-0.23/bigtop.mk (original)
+++ incubator/bigtop/branches/hadoop-0.23/bigtop.mk Fri Nov  4 16:08:04 2011
@@ -79,7 +79,7 @@ HIVE_TARBALL_DST=hive-$(HIVE_BASE_VERSIO
 #HIVE_TARBALL_SRC=$(HIVE_TARBALL_DST)
 #HIVE_SITE=$(APACHE_MIRROR)/hive/hive-$(HIVE_BASE_VERSION)/
 HIVE_SITE=https://github.com/apache/hive/tarball
-HIVE_TARBALL_SRC=6e882d8
+HIVE_TARBALL_SRC=f412000
 $(eval $(call PACKAGE,hive,HIVE))
 
 # Sqoop


Reply via email to