svn commit: r1431251 - in /hadoop/common/trunk/hadoop-common-project/hadoop-common: CHANGES.txt src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java

2013-01-10 Thread tomwhite
Author: tomwhite
Date: Thu Jan 10 10:05:53 2013
New Revision: 1431251

URL: http://svn.apache.org/viewvc?rev=1431251view=rev
Log:
HADOOP-9183. Potential deadlock in ActiveStandbyElector.

Modified:
hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt

hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java

Modified: hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt?rev=1431251r1=1431250r2=1431251view=diff
==
--- hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt 
(original)
+++ hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt Thu Jan 
10 10:05:53 2013
@@ -528,6 +528,8 @@ Release 2.0.3-alpha - Unreleased 
 HADOOP-9155. FsPermission should have different default value, 777 for
 directory and 666 for file. (Binglin Chang via atm)
 
+HADOOP-9183. Potential deadlock in ActiveStandbyElector. (tomwhite)
+
 Release 2.0.2-alpha - 2012-09-07 
 
   INCOMPATIBLE CHANGES

Modified: 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java?rev=1431251r1=1431250r2=1431251view=diff
==
--- 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
 (original)
+++ 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
 Thu Jan 10 10:05:53 2013
@@ -613,7 +613,7 @@ public class ActiveStandbyElector implem
 // Unfortunately, the ZooKeeper constructor connects to ZooKeeper and
 // may trigger the Connected event immediately. So, if we register the
 // watcher after constructing ZooKeeper, we may miss that event. Instead,
-// we construct the watcher first, and have it queue any events it receives
+// we construct the watcher first, and have it block any events it receives
 // before we can set its ZooKeeper reference.
 WatcherWithClientRef watcher = new WatcherWithClientRef();
 ZooKeeper zk = new ZooKeeper(zkHostPort, zkSessionTimeout, watcher);
@@ -1002,19 +1002,17 @@ public class ActiveStandbyElector implem
 private CountDownLatch hasReceivedEvent = new CountDownLatch(1);
 
 /**
- * If any events arrive before the reference to ZooKeeper is set,
- * they get queued up and later forwarded when the reference is
- * available.
+ * Latch used to wait until the reference to ZooKeeper is set.
  */
-private final ListWatchedEvent queuedEvents = Lists.newLinkedList();
+private CountDownLatch hasSetZooKeeper = new CountDownLatch(1);
 
 private WatcherWithClientRef() {
 }
 
 private WatcherWithClientRef(ZooKeeper zk) {
-  this.zk = zk;
+  setZooKeeperRef(zk);
 }
-
+
 /**
  * Waits for the next event from ZooKeeper to arrive.
  * 
@@ -1029,9 +1027,7 @@ public class ActiveStandbyElector implem
 if (!hasReceivedEvent.await(connectionTimeoutMs, 
TimeUnit.MILLISECONDS)) {
   LOG.error(Connection timed out: couldn't connect to ZooKeeper in 
   + connectionTimeoutMs +  milliseconds);
-  synchronized (this) {
-zk.close();
-  }
+  zk.close();
   throw KeeperException.create(Code.CONNECTIONLOSS);
 }
   } catch (InterruptedException e) {
@@ -1041,29 +1037,18 @@ public class ActiveStandbyElector implem
   }
 }
 
-private synchronized void setZooKeeperRef(ZooKeeper zk) {
+private void setZooKeeperRef(ZooKeeper zk) {
   Preconditions.checkState(this.zk == null,
   zk already set -- must be set exactly once);
   this.zk = zk;
-  
-  for (WatchedEvent e : queuedEvents) {
-forwardEvent(e);
-  }
-  queuedEvents.clear();
+  hasSetZooKeeper.countDown();
 }
 
 @Override
-public synchronized void process(WatchedEvent event) {
-  if (zk != null) {
-forwardEvent(event);
-  } else {
-queuedEvents.add(event);
-  }
-}
-
-private void forwardEvent(WatchedEvent event) {
+public void process(WatchedEvent event) {
   hasReceivedEvent.countDown();
   try {
+hasSetZooKeeper.await(zkSessionTimeout, TimeUnit.MILLISECONDS);
 ActiveStandbyElector.this.processWatchEvent(
 zk, event);
   } catch (Throwable t) {




svn commit: r1431252 - in /hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common: CHANGES.txt src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java

2013-01-10 Thread tomwhite
Author: tomwhite
Date: Thu Jan 10 10:09:06 2013
New Revision: 1431252

URL: http://svn.apache.org/viewvc?rev=1431252view=rev
Log:
Merge -r 1431250:1431251 from trunk to branch-2. Fixes: HADOOP-9183. Potential 
deadlock in ActiveStandbyElector.

Modified:

hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt

hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java

Modified: 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt?rev=1431252r1=1431251r2=1431252view=diff
==
--- 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt 
(original)
+++ 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt 
Thu Jan 10 10:09:06 2013
@@ -225,6 +225,8 @@ Release 2.0.3-alpha - Unreleased 
 HADOOP-9155. FsPermission should have different default value, 777 for
 directory and 666 for file. (Binglin Chang via atm)
 
+HADOOP-9183. Potential deadlock in ActiveStandbyElector. (tomwhite)
+
 Release 2.0.2-alpha - 2012-09-07 
 
   INCOMPATIBLE CHANGES

Modified: 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java?rev=1431252r1=1431251r2=1431252view=diff
==
--- 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
 (original)
+++ 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
 Thu Jan 10 10:09:06 2013
@@ -613,7 +613,7 @@ public class ActiveStandbyElector implem
 // Unfortunately, the ZooKeeper constructor connects to ZooKeeper and
 // may trigger the Connected event immediately. So, if we register the
 // watcher after constructing ZooKeeper, we may miss that event. Instead,
-// we construct the watcher first, and have it queue any events it receives
+// we construct the watcher first, and have it block any events it receives
 // before we can set its ZooKeeper reference.
 WatcherWithClientRef watcher = new WatcherWithClientRef();
 ZooKeeper zk = new ZooKeeper(zkHostPort, zkSessionTimeout, watcher);
@@ -1002,19 +1002,17 @@ public class ActiveStandbyElector implem
 private CountDownLatch hasReceivedEvent = new CountDownLatch(1);
 
 /**
- * If any events arrive before the reference to ZooKeeper is set,
- * they get queued up and later forwarded when the reference is
- * available.
+ * Latch used to wait until the reference to ZooKeeper is set.
  */
-private final ListWatchedEvent queuedEvents = Lists.newLinkedList();
+private CountDownLatch hasSetZooKeeper = new CountDownLatch(1);
 
 private WatcherWithClientRef() {
 }
 
 private WatcherWithClientRef(ZooKeeper zk) {
-  this.zk = zk;
+  setZooKeeperRef(zk);
 }
-
+
 /**
  * Waits for the next event from ZooKeeper to arrive.
  * 
@@ -1029,9 +1027,7 @@ public class ActiveStandbyElector implem
 if (!hasReceivedEvent.await(connectionTimeoutMs, 
TimeUnit.MILLISECONDS)) {
   LOG.error(Connection timed out: couldn't connect to ZooKeeper in 
   + connectionTimeoutMs +  milliseconds);
-  synchronized (this) {
-zk.close();
-  }
+  zk.close();
   throw KeeperException.create(Code.CONNECTIONLOSS);
 }
   } catch (InterruptedException e) {
@@ -1041,29 +1037,18 @@ public class ActiveStandbyElector implem
   }
 }
 
-private synchronized void setZooKeeperRef(ZooKeeper zk) {
+private void setZooKeeperRef(ZooKeeper zk) {
   Preconditions.checkState(this.zk == null,
   zk already set -- must be set exactly once);
   this.zk = zk;
-  
-  for (WatchedEvent e : queuedEvents) {
-forwardEvent(e);
-  }
-  queuedEvents.clear();
+  hasSetZooKeeper.countDown();
 }
 
 @Override
-public synchronized void process(WatchedEvent event) {
-  if (zk != null) {
-forwardEvent(event);
-  } else {
-queuedEvents.add(event);
-  }
-}
-
-private void forwardEvent(WatchedEvent event) {
+public void process(WatchedEvent event) {
   hasReceivedEvent.countDown();
   try {
+hasSetZooKeeper.await(zkSessionTimeout, TimeUnit.MILLISECONDS);
 ActiveStandbyElector.this.processWatchEvent(
 zk, event);
   } catch (Throwable t) {




svn commit: r1431506 - in /hadoop/common/branches/branch-0.23: ./ hadoop-project/ hadoop-project/src/site/

2013-01-10 Thread kihwal
Author: kihwal
Date: Thu Jan 10 17:10:07 2013
New Revision: 1431506

URL: http://svn.apache.org/viewvc?rev=1431506view=rev
Log:
merge -r 1382408:1382409 for HDFS-2757

Modified:
hadoop/common/branches/branch-0.23/   (props changed)
hadoop/common/branches/branch-0.23/hadoop-project/   (props changed)
hadoop/common/branches/branch-0.23/hadoop-project/src/site/   (props 
changed)

Propchange: hadoop/common/branches/branch-0.23/
--
  Merged /hadoop/common/trunk:r1382409

Propchange: hadoop/common/branches/branch-0.23/hadoop-project/
--
  Merged /hadoop/common/trunk/hadoop-project:r1382409

Propchange: hadoop/common/branches/branch-0.23/hadoop-project/src/site/
--
  Merged /hadoop/common/trunk/hadoop-project/src/site:r1382409




svn commit: r1431506 - in /hadoop/common/branches/branch-0.23/hadoop-common-project: ./ hadoop-auth/ hadoop-common/ hadoop-common/CHANGES.txt hadoop-common/src/main/docs/ hadoop-common/src/main/java/

2013-01-10 Thread kihwal
Author: kihwal
Date: Thu Jan 10 17:10:07 2013
New Revision: 1431506

URL: http://svn.apache.org/viewvc?rev=1431506view=rev
Log:
merge -r 1382408:1382409 for HDFS-2757

Modified:
hadoop/common/branches/branch-0.23/hadoop-common-project/   (props changed)
hadoop/common/branches/branch-0.23/hadoop-common-project/hadoop-auth/   
(props changed)
hadoop/common/branches/branch-0.23/hadoop-common-project/hadoop-common/   
(props changed)

hadoop/common/branches/branch-0.23/hadoop-common-project/hadoop-common/CHANGES.txt
   (props changed)

hadoop/common/branches/branch-0.23/hadoop-common-project/hadoop-common/src/main/docs/
   (props changed)

hadoop/common/branches/branch-0.23/hadoop-common-project/hadoop-common/src/main/java/
   (props changed)

hadoop/common/branches/branch-0.23/hadoop-common-project/hadoop-common/src/test/core/
   (props changed)

Propchange: hadoop/common/branches/branch-0.23/hadoop-common-project/
--
  Merged /hadoop/common/trunk/hadoop-common-project:r1382409

Propchange: 
hadoop/common/branches/branch-0.23/hadoop-common-project/hadoop-auth/
--
  Merged /hadoop/common/trunk/hadoop-common-project/hadoop-auth:r1382409

Propchange: 
hadoop/common/branches/branch-0.23/hadoop-common-project/hadoop-common/
--
  Merged /hadoop/common/trunk/hadoop-common-project/hadoop-common:r1382409

Propchange: 
hadoop/common/branches/branch-0.23/hadoop-common-project/hadoop-common/CHANGES.txt
--
  Merged 
/hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt:r1382409

Propchange: 
hadoop/common/branches/branch-0.23/hadoop-common-project/hadoop-common/src/main/docs/
--
  Merged 
/hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/docs:r1382409

Propchange: 
hadoop/common/branches/branch-0.23/hadoop-common-project/hadoop-common/src/main/java/
--
  Merged 
/hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java:r1382409

Propchange: 
hadoop/common/branches/branch-0.23/hadoop-common-project/hadoop-common/src/test/core/
--
  Merged 
/hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/core:r1382409




[Hadoop Wiki] Update of Hbase/HBaseVersions by stack

2013-01-10 Thread Apache Wiki
Dear Wiki user,

You have subscribed to a wiki page or wiki category on Hadoop Wiki for change 
notification.

The Hbase/HBaseVersions page has been changed by stack:
http://wiki.apache.org/hadoop/Hbase/HBaseVersions?action=diffrev1=3rev2=4

  = HBase Versions =
+ 
+ THIS DOC IS DATED.  NEEDS MOVING TO THE RERERENCE GUIDE AND UPDATING -- St.Ack
  
  There are currently the following active branches in HBase:
   * '''0.20''' - the current stable release series, being maintained with 
patches for bug fixes only. This release series does '''not''' support HDFS 
durability - edits may be lost in the case of node failure.


svn commit: r1431739 - in /hadoop/common/trunk/hadoop-common-project/hadoop-common: CHANGES.txt src/main/java/org/apache/hadoop/io/compress/GzipCodec.java

2013-01-10 Thread eyang
Author: eyang
Date: Thu Jan 10 23:58:11 2013
New Revision: 1431739

URL: http://svn.apache.org/viewvc?rev=1431739view=rev
Log:
HADOOP-8419. Fixed GzipCode NPE reset for IBM JDK. (Yu Li via eyang)

Modified:
hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt

hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java

Modified: hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt?rev=1431739r1=1431738r2=1431739view=diff
==
--- hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt 
(original)
+++ hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt Thu Jan 
10 23:58:11 2013
@@ -148,6 +148,8 @@ Trunk (Unreleased)
 
   BUG FIXES
 
+HADOOP-8419. Fixed GzipCode NPE reset for IBM JDK. (Yu Li via eyang)
+
 HADOOP-9041. FsUrlStreamHandlerFactory could cause an infinite loop in
 FileSystem initialization. (Yanbo Liang and Radim Kolar via llu)
 

Modified: 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java?rev=1431739r1=1431738r2=1431739view=diff
==
--- 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java
 (original)
+++ 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java
 Thu Jan 10 23:58:11 2013
@@ -40,14 +40,74 @@ public class GzipCodec extends DefaultCo
   protected static class GzipOutputStream extends CompressorStream {
 
 private static class ResetableGZIPOutputStream extends GZIPOutputStream {
-  
+  private static final int TRAILER_SIZE = 8;
+  public static final String JVMVendor= System.getProperty(java.vendor);
+  public static final String JVMVersion= 
System.getProperty(java.version);
+  private static final boolean HAS_BROKEN_FINISH =
+  (JVMVendor.contains(IBM)  JVMVersion.contains(1.6.0));
+
   public ResetableGZIPOutputStream(OutputStream out) throws IOException {
 super(out);
   }
-  
+
   public void resetState() throws IOException {
 def.reset();
   }
+
+  /**
+   * Override this method for HADOOP-8419.
+   * Override because IBM implementation calls def.end() which
+   * causes problem when reseting the stream for reuse.
+   *
+   */
+  @Override
+  public void finish() throws IOException {
+if (HAS_BROKEN_FINISH) {
+  if (!def.finished()) {
+def.finish();
+while (!def.finished()) {
+  int i = def.deflate(this.buf, 0, this.buf.length);
+  if ((def.finished())  (i = this.buf.length - TRAILER_SIZE)) {
+writeTrailer(this.buf, i);
+i += TRAILER_SIZE;
+out.write(this.buf, 0, i);
+
+return;
+  }
+  if (i  0) {
+out.write(this.buf, 0, i);
+  }
+}
+
+byte[] arrayOfByte = new byte[TRAILER_SIZE];
+writeTrailer(arrayOfByte, 0);
+out.write(arrayOfByte);
+  }
+} else {
+  super.finish();
+}
+  }
+
+  /** re-implement for HADOOP-8419 because the relative method in jdk is 
invisible */
+  private void writeTrailer(byte[] paramArrayOfByte, int paramInt)
+throws IOException {
+writeInt((int)this.crc.getValue(), paramArrayOfByte, paramInt);
+writeInt(this.def.getTotalIn(), paramArrayOfByte, paramInt + 4);
+  }
+
+  /** re-implement for HADOOP-8419 because the relative method in jdk is 
invisible */
+  private void writeInt(int paramInt1, byte[] paramArrayOfByte, int 
paramInt2)
+throws IOException {
+writeShort(paramInt1  0x, paramArrayOfByte, paramInt2);
+writeShort(paramInt1  16  0x, paramArrayOfByte, paramInt2 + 2);
+  }
+
+  /** re-implement for HADOOP-8419 because the relative method in jdk is 
invisible */
+  private void writeShort(int paramInt1, byte[] paramArrayOfByte, int 
paramInt2)
+throws IOException {
+paramArrayOfByte[paramInt2] = (byte)(paramInt1  0xFF);
+paramArrayOfByte[(paramInt2 + 1)] = (byte)(paramInt1  8  0xFF);
+  }
 }
 
 public GzipOutputStream(OutputStream out) throws IOException {




svn commit: r1431740 - /hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCompressionStreamReuse.java

2013-01-10 Thread eyang
Author: eyang
Date: Fri Jan 11 00:00:13 2013
New Revision: 1431740

URL: http://svn.apache.org/viewvc?rev=1431740view=rev
Log:
HADOOP-8419. Fixed GzipCode NPE reset for IBM JDK. (Yu Li via eyang)

Added:

hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCompressionStreamReuse.java

Added: 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCompressionStreamReuse.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCompressionStreamReuse.java?rev=1431740view=auto
==
--- 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCompressionStreamReuse.java
 (added)
+++ 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCompressionStreamReuse.java
 Fri Jan 11 00:00:13 2013
@@ -0,0 +1,161 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.compress;
+
+import java.io.BufferedInputStream;
+import java.io.BufferedOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.util.Random;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.DataInputBuffer;
+import org.apache.hadoop.io.DataOutputBuffer;
+import org.apache.hadoop.io.RandomDatum;
+import org.apache.hadoop.io.compress.zlib.ZlibFactory;
+import org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel;
+import org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy;
+import org.apache.hadoop.util.ReflectionUtils;
+
+import junit.framework.TestCase;
+
+public class TestCompressionStreamReuse extends TestCase {
+  private static final Log LOG = LogFactory
+  .getLog(TestCompressionStreamReuse.class);
+
+  private Configuration conf = new Configuration();
+  private int count = 1;
+  private int seed = new Random().nextInt();
+
+  public void testBZip2Codec() throws IOException {
+resetStateTest(conf, seed, count,
+org.apache.hadoop.io.compress.BZip2Codec);
+  }
+
+  public void testGzipCompressStreamReuse() throws IOException {
+resetStateTest(conf, seed, count,
+org.apache.hadoop.io.compress.GzipCodec);
+  }
+
+  public void testGzipCompressStreamReuseWithParam() throws IOException {
+Configuration conf = new Configuration(this.conf);
+ZlibFactory
+.setCompressionLevel(conf, CompressionLevel.BEST_COMPRESSION);
+ZlibFactory.setCompressionStrategy(conf,
+CompressionStrategy.HUFFMAN_ONLY);
+resetStateTest(conf, seed, count,
+org.apache.hadoop.io.compress.GzipCodec);
+  }
+
+  private static void resetStateTest(Configuration conf, int seed, int count,
+  String codecClass) throws IOException {
+// Create the codec
+CompressionCodec codec = null;
+try {
+  codec = (CompressionCodec) ReflectionUtils.newInstance(conf
+  .getClassByName(codecClass), conf);
+} catch (ClassNotFoundException cnfe) {
+  throw new IOException(Illegal codec!);
+}
+LOG.info(Created a Codec object of type:  + codecClass);
+
+// Generate data
+DataOutputBuffer data = new DataOutputBuffer();
+RandomDatum.Generator generator = new RandomDatum.Generator(seed);
+for (int i = 0; i  count; ++i) {
+  generator.next();
+  RandomDatum key = generator.getKey();
+  RandomDatum value = generator.getValue();
+
+  key.write(data);
+  value.write(data);
+}
+LOG.info(Generated  + count +  records);
+
+// Compress data
+DataOutputBuffer compressedDataBuffer = new DataOutputBuffer();
+DataOutputStream deflateOut = new DataOutputStream(
+new BufferedOutputStream(compressedDataBuffer));
+CompressionOutputStream deflateFilter = codec
+.createOutputStream(deflateOut);
+deflateFilter.write(data.getData(), 0, data.getLength());
+deflateFilter.finish();
+

svn commit: r1431743 - in /hadoop/common/branches/branch-1: CHANGES.txt src/core/org/apache/hadoop/io/compress/GzipCodec.java src/test/org/apache/hadoop/io/compress/TestCompressionStreamReuse.java

2013-01-10 Thread eyang
Author: eyang
Date: Fri Jan 11 00:02:12 2013
New Revision: 1431743

URL: http://svn.apache.org/viewvc?rev=1431743view=rev
Log:
HADOOP-8419. Fixed GzipCode NPE reset for IBM JDK. (Yu Li via eyang)

Added:

hadoop/common/branches/branch-1/src/test/org/apache/hadoop/io/compress/TestCompressionStreamReuse.java
Modified:
hadoop/common/branches/branch-1/CHANGES.txt

hadoop/common/branches/branch-1/src/core/org/apache/hadoop/io/compress/GzipCodec.java

Modified: hadoop/common/branches/branch-1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/CHANGES.txt?rev=1431743r1=1431742r2=1431743view=diff
==
--- hadoop/common/branches/branch-1/CHANGES.txt (original)
+++ hadoop/common/branches/branch-1/CHANGES.txt Fri Jan 11 00:02:12 2013
@@ -167,6 +167,8 @@ Release 1.2.0 - unreleased
 
   BUG FIXES
 
+HADOOP-8419. Fixed GzipCode NPE reset for IBM JDK. (Yu Li via eyang)
+
 MAPREDUCE-4904. OTHER_LOCAL_MAPS counter is not correct.
 (Junping Du via llu)
 

Modified: 
hadoop/common/branches/branch-1/src/core/org/apache/hadoop/io/compress/GzipCodec.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/core/org/apache/hadoop/io/compress/GzipCodec.java?rev=1431743r1=1431742r2=1431743view=diff
==
--- 
hadoop/common/branches/branch-1/src/core/org/apache/hadoop/io/compress/GzipCodec.java
 (original)
+++ 
hadoop/common/branches/branch-1/src/core/org/apache/hadoop/io/compress/GzipCodec.java
 Fri Jan 11 00:02:12 2013
@@ -39,14 +39,74 @@ public class GzipCodec extends DefaultCo
   protected static class GzipOutputStream extends CompressorStream {
 
 private static class ResetableGZIPOutputStream extends GZIPOutputStream {
-  
+  private static final int TRAILER_SIZE = 8;
+  public static final String JVMVendor= System.getProperty(java.vendor);
+  public static final String JVMVersion= 
System.getProperty(java.version);
+  private static final boolean HAS_BROKEN_FINISH =
+  (JVMVendor.contains(IBM)  JVMVersion.contains(1.6.0));
+
   public ResetableGZIPOutputStream(OutputStream out) throws IOException {
 super(out);
   }
-  
+
   public void resetState() throws IOException {
 def.reset();
   }
+
+  /**
+   * Override this method for HADOOP-8419.
+   * Override because IBM implementation calls def.end() which
+   * causes problem when reseting the stream for reuse.
+   *
+   */
+  @Override
+  public void finish() throws IOException {
+if (HAS_BROKEN_FINISH) {
+  if (!def.finished()) {
+def.finish();
+while (!def.finished()) {
+  int i = def.deflate(this.buf, 0, this.buf.length);
+  if ((def.finished())  (i = this.buf.length - TRAILER_SIZE)) {
+writeTrailer(this.buf, i);
+i += TRAILER_SIZE;
+out.write(this.buf, 0, i);
+
+return;
+  }
+  if (i  0) {
+out.write(this.buf, 0, i);
+  }
+}
+
+byte[] arrayOfByte = new byte[TRAILER_SIZE];
+writeTrailer(arrayOfByte, 0);
+out.write(arrayOfByte);
+  }
+} else {
+  super.finish();
+}
+  }
+
+  /** re-implement for HADOOP-8419 because the relative method in jdk is 
invisible */
+  private void writeTrailer(byte[] paramArrayOfByte, int paramInt)
+throws IOException {
+writeInt((int)this.crc.getValue(), paramArrayOfByte, paramInt);
+writeInt(this.def.getTotalIn(), paramArrayOfByte, paramInt + 4);
+  }
+
+  /** re-implement for HADOOP-8419 because the relative method in jdk is 
invisible */
+  private void writeInt(int paramInt1, byte[] paramArrayOfByte, int 
paramInt2)
+throws IOException {
+writeShort(paramInt1  0x, paramArrayOfByte, paramInt2);
+writeShort(paramInt1  16  0x, paramArrayOfByte, paramInt2 + 2);
+  }
+
+  /** re-implement for HADOOP-8419 because the relative method in jdk is 
invisible */
+  private void writeShort(int paramInt1, byte[] paramArrayOfByte, int 
paramInt2)
+throws IOException {
+paramArrayOfByte[paramInt2] = (byte)(paramInt1  0xFF);
+paramArrayOfByte[(paramInt2 + 1)] = (byte)(paramInt1  8  0xFF);
+  }
 }
 
 public GzipOutputStream(OutputStream out) throws IOException {

Added: 
hadoop/common/branches/branch-1/src/test/org/apache/hadoop/io/compress/TestCompressionStreamReuse.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/test/org/apache/hadoop/io/compress/TestCompressionStreamReuse.java?rev=1431743view=auto
==
--- 

svn commit: r1431744 - in /hadoop/common/branches/branch-1.0: CHANGES.txt src/core/org/apache/hadoop/io/compress/GzipCodec.java src/test/org/apache/hadoop/io/compress/TestCompressionStreamReuse.java

2013-01-10 Thread eyang
Author: eyang
Date: Fri Jan 11 00:03:16 2013
New Revision: 1431744

URL: http://svn.apache.org/viewvc?rev=1431744view=rev
Log:
HADOOP-8419. Fixed GzipCode NPE reset for IBM JDK. (Yu Li via eyang)

Added:

hadoop/common/branches/branch-1.0/src/test/org/apache/hadoop/io/compress/TestCompressionStreamReuse.java
Modified:
hadoop/common/branches/branch-1.0/CHANGES.txt

hadoop/common/branches/branch-1.0/src/core/org/apache/hadoop/io/compress/GzipCodec.java

Modified: hadoop/common/branches/branch-1.0/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1.0/CHANGES.txt?rev=1431744r1=1431743r2=1431744view=diff
==
--- hadoop/common/branches/branch-1.0/CHANGES.txt (original)
+++ hadoop/common/branches/branch-1.0/CHANGES.txt Fri Jan 11 00:03:16 2013
@@ -8,6 +8,8 @@ Release 1.0.5 - unreleased
 
   BUG FIXES
 
+HADOOP-8419. Fixed GzipCode NPE reset for IBM JDK. (Yu Li via eyang)
+
 MAPREDUCE-4272. SortedRanges.Range#compareTo is not spec compliant.
 (Yu Gao via llu)
 

Modified: 
hadoop/common/branches/branch-1.0/src/core/org/apache/hadoop/io/compress/GzipCodec.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1.0/src/core/org/apache/hadoop/io/compress/GzipCodec.java?rev=1431744r1=1431743r2=1431744view=diff
==
--- 
hadoop/common/branches/branch-1.0/src/core/org/apache/hadoop/io/compress/GzipCodec.java
 (original)
+++ 
hadoop/common/branches/branch-1.0/src/core/org/apache/hadoop/io/compress/GzipCodec.java
 Fri Jan 11 00:03:16 2013
@@ -39,14 +39,74 @@ public class GzipCodec extends DefaultCo
   protected static class GzipOutputStream extends CompressorStream {
 
 private static class ResetableGZIPOutputStream extends GZIPOutputStream {
-  
+  private static final int TRAILER_SIZE = 8;
+  public static final String JVMVendor= System.getProperty(java.vendor);
+  public static final String JVMVersion= 
System.getProperty(java.version);
+  private static final boolean HAS_BROKEN_FINISH =
+  (JVMVendor.contains(IBM)  JVMVersion.contains(1.6.0));
+
   public ResetableGZIPOutputStream(OutputStream out) throws IOException {
 super(out);
   }
-  
+
   public void resetState() throws IOException {
 def.reset();
   }
+
+  /**
+   * Override this method for HADOOP-8419.
+   * Override because IBM implementation calls def.end() which
+   * causes problem when reseting the stream for reuse.
+   *
+   */
+  @Override
+  public void finish() throws IOException {
+if (HAS_BROKEN_FINISH) {
+  if (!def.finished()) {
+def.finish();
+while (!def.finished()) {
+  int i = def.deflate(this.buf, 0, this.buf.length);
+  if ((def.finished())  (i = this.buf.length - TRAILER_SIZE)) {
+writeTrailer(this.buf, i);
+i += TRAILER_SIZE;
+out.write(this.buf, 0, i);
+
+return;
+  }
+  if (i  0) {
+out.write(this.buf, 0, i);
+  }
+}
+
+byte[] arrayOfByte = new byte[TRAILER_SIZE];
+writeTrailer(arrayOfByte, 0);
+out.write(arrayOfByte);
+  }
+} else {
+  super.finish();
+}
+  }
+
+  /** re-implement for HADOOP-8419 because the relative method in jdk is 
invisible */
+  private void writeTrailer(byte[] paramArrayOfByte, int paramInt)
+throws IOException {
+writeInt((int)this.crc.getValue(), paramArrayOfByte, paramInt);
+writeInt(this.def.getTotalIn(), paramArrayOfByte, paramInt + 4);
+  }
+
+  /** re-implement for HADOOP-8419 because the relative method in jdk is 
invisible */
+  private void writeInt(int paramInt1, byte[] paramArrayOfByte, int 
paramInt2)
+throws IOException {
+writeShort(paramInt1  0x, paramArrayOfByte, paramInt2);
+writeShort(paramInt1  16  0x, paramArrayOfByte, paramInt2 + 2);
+  }
+
+  /** re-implement for HADOOP-8419 because the relative method in jdk is 
invisible */
+  private void writeShort(int paramInt1, byte[] paramArrayOfByte, int 
paramInt2)
+throws IOException {
+paramArrayOfByte[paramInt2] = (byte)(paramInt1  0xFF);
+paramArrayOfByte[(paramInt2 + 1)] = (byte)(paramInt1  8  0xFF);
+  }
 }
 
 public GzipOutputStream(OutputStream out) throws IOException {

Added: 
hadoop/common/branches/branch-1.0/src/test/org/apache/hadoop/io/compress/TestCompressionStreamReuse.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1.0/src/test/org/apache/hadoop/io/compress/TestCompressionStreamReuse.java?rev=1431744view=auto
==
--- 

svn commit: r1431751 - in /hadoop/common/branches/branch-1.1: CHANGES.txt src/core/org/apache/hadoop/io/compress/GzipCodec.java src/test/org/apache/hadoop/io/compress/TestCompressionStreamReuse.java

2013-01-10 Thread eyang
Author: eyang
Date: Fri Jan 11 00:07:40 2013
New Revision: 1431751

URL: http://svn.apache.org/viewvc?rev=1431751view=rev
Log:
HADOOP-8419. Fixed GzipCode NPE reset for IBM JDK. (Yu Li via eyang)

Added:

hadoop/common/branches/branch-1.1/src/test/org/apache/hadoop/io/compress/TestCompressionStreamReuse.java
Modified:
hadoop/common/branches/branch-1.1/CHANGES.txt

hadoop/common/branches/branch-1.1/src/core/org/apache/hadoop/io/compress/GzipCodec.java

Modified: hadoop/common/branches/branch-1.1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1.1/CHANGES.txt?rev=1431751r1=1431750r2=1431751view=diff
==
--- hadoop/common/branches/branch-1.1/CHANGES.txt (original)
+++ hadoop/common/branches/branch-1.1/CHANGES.txt Fri Jan 11 00:07:40 2013
@@ -22,6 +22,8 @@ Release 1.1.2 - 2012.12.07
 
   BUG FIXES
 
+HADOOP-8419. Fixed GzipCode NPE reset for IBM JDK. (Yu Li via eyang)
+
 MAPREDUCE-4272. SortedRanges.Range#compareTo is not spec compliant.
 (Yu Gao via llu)
 

Modified: 
hadoop/common/branches/branch-1.1/src/core/org/apache/hadoop/io/compress/GzipCodec.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1.1/src/core/org/apache/hadoop/io/compress/GzipCodec.java?rev=1431751r1=1431750r2=1431751view=diff
==
--- 
hadoop/common/branches/branch-1.1/src/core/org/apache/hadoop/io/compress/GzipCodec.java
 (original)
+++ 
hadoop/common/branches/branch-1.1/src/core/org/apache/hadoop/io/compress/GzipCodec.java
 Fri Jan 11 00:07:40 2013
@@ -39,14 +39,74 @@ public class GzipCodec extends DefaultCo
   protected static class GzipOutputStream extends CompressorStream {
 
 private static class ResetableGZIPOutputStream extends GZIPOutputStream {
-  
+  private static final int TRAILER_SIZE = 8;
+  public static final String JVMVendor= System.getProperty(java.vendor);
+  public static final String JVMVersion= 
System.getProperty(java.version);
+  private static final boolean HAS_BROKEN_FINISH =
+  (JVMVendor.contains(IBM)  JVMVersion.contains(1.6.0));
+
   public ResetableGZIPOutputStream(OutputStream out) throws IOException {
 super(out);
   }
-  
+
   public void resetState() throws IOException {
 def.reset();
   }
+
+  /**
+   * Override this method for HADOOP-8419.
+   * Override because IBM implementation calls def.end() which
+   * causes problem when reseting the stream for reuse.
+   *
+   */
+  @Override
+  public void finish() throws IOException {
+if (HAS_BROKEN_FINISH) {
+  if (!def.finished()) {
+def.finish();
+while (!def.finished()) {
+  int i = def.deflate(this.buf, 0, this.buf.length);
+  if ((def.finished())  (i = this.buf.length - TRAILER_SIZE)) {
+writeTrailer(this.buf, i);
+i += TRAILER_SIZE;
+out.write(this.buf, 0, i);
+
+return;
+  }
+  if (i  0) {
+out.write(this.buf, 0, i);
+  }
+}
+
+byte[] arrayOfByte = new byte[TRAILER_SIZE];
+writeTrailer(arrayOfByte, 0);
+out.write(arrayOfByte);
+  }
+} else {
+  super.finish();
+}
+  }
+
+  /** re-implement for HADOOP-8419 because the relative method in jdk is 
invisible */
+  private void writeTrailer(byte[] paramArrayOfByte, int paramInt)
+throws IOException {
+writeInt((int)this.crc.getValue(), paramArrayOfByte, paramInt);
+writeInt(this.def.getTotalIn(), paramArrayOfByte, paramInt + 4);
+  }
+
+  /** re-implement for HADOOP-8419 because the relative method in jdk is 
invisible */
+  private void writeInt(int paramInt1, byte[] paramArrayOfByte, int 
paramInt2)
+throws IOException {
+writeShort(paramInt1  0x, paramArrayOfByte, paramInt2);
+writeShort(paramInt1  16  0x, paramArrayOfByte, paramInt2 + 2);
+  }
+
+  /** re-implement for HADOOP-8419 because the relative method in jdk is 
invisible */
+  private void writeShort(int paramInt1, byte[] paramArrayOfByte, int 
paramInt2)
+throws IOException {
+paramArrayOfByte[paramInt2] = (byte)(paramInt1  0xFF);
+paramArrayOfByte[(paramInt2 + 1)] = (byte)(paramInt1  8  0xFF);
+  }
 }
 
 public GzipOutputStream(OutputStream out) throws IOException {

Added: 
hadoop/common/branches/branch-1.1/src/test/org/apache/hadoop/io/compress/TestCompressionStreamReuse.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1.1/src/test/org/apache/hadoop/io/compress/TestCompressionStreamReuse.java?rev=1431751view=auto
==
--- 

[Hadoop Wiki] Update of Chukwa_How_To_Contribute by EricYang

2013-01-10 Thread Apache Wiki
Dear Wiki user,

You have subscribed to a wiki page or wiki category on Hadoop Wiki for change 
notification.

The Chukwa_How_To_Contribute page has been changed by EricYang:
http://wiki.apache.org/hadoop/Chukwa_How_To_Contribute?action=diffrev1=3rev2=4

  Anchor(Documentation)
   Committing Documentation 
  
- Chukwa's official documentation is authored using 
[[http://forrest.apache.org/|Forrest]].  To commit documentation changes you 
must have Forrest installed and the {{{forrest}}} executable on your 
{{{$PATH}}}. Note that the current version (0.8) doesn't work properly with 
Java 6, use Java 5 instead. Documentation is of two types:
+ Chukwa's official documentation is authored using Maven APT format.  To 
commit documentation changes you must have Maven installed and the {{{mvn}}} 
executable on your {{{$PATH}}}. Documentation is of two types:
   1. End-user documentation, versioned with releases; and,
   1. The website.  This is maintained separately in subversion, republished as 
it is changed.
  
- To commit end-user documentation changes to trunk or a branch, ask the user 
to submit only changes made to the *.xml files in {{{src/docs}}}. Apply that 
patch, run {{{ant docs}}} to generate the html, and then commit.  End-user 
documentation is only published to the web when releases are made, as described 
in HowToRelease.
+ To commit end-user documentation changes to trunk or a branch, ask the user 
to submit only changes made to the *.apt files in {{{src/docs}}}. Apply that 
patch, run {{{mvn site}}} to generate the html, and then {{{mvn site-deploy}} 
to publish.  End-user documentation is only published to the web when releases 
are made, as described in HowToRelease.
  
  To commit changes to the website and re-publish them: {{{
  svn co https://svn.apache.org/repos/asf/incubator/chukwa/site
  cd site
- ant
+ mvn site
- firefox publish/index.html # preview the changes
+ firefox target/index.html # preview the changes
  svn stat   # check for new pages
  svn add# add any new pages
  svn commit
+ mvn site-deploy
- ssh people.apache.org
- cd /www/incubator.apache.org/chukwa
- svn up
  }}}
  
- Changes to website (''via svn up'') might take upto an hour to be reflected 
on Chukwa site.
+ Changes to website (''via mvn site-deploy'') might take upto an hour to be 
reflected on Chukwa site.
  
  == Backporting commits to previous branches ==
  


[Hadoop Wiki] Update of Chukwa_How_To_Contribute by EricYang

2013-01-10 Thread Apache Wiki
Dear Wiki user,

You have subscribed to a wiki page or wiki category on Hadoop Wiki for change 
notification.

The Chukwa_How_To_Contribute page has been changed by EricYang:
http://wiki.apache.org/hadoop/Chukwa_How_To_Contribute?action=diffrev1=4rev2=5

   1. End-user documentation, versioned with releases; and,
   1. The website.  This is maintained separately in subversion, republished as 
it is changed.
  
- To commit end-user documentation changes to trunk or a branch, ask the user 
to submit only changes made to the *.apt files in {{{src/docs}}}. Apply that 
patch, run {{{mvn site}}} to generate the html, and then {{{mvn site-deploy}} 
to publish.  End-user documentation is only published to the web when releases 
are made, as described in HowToRelease.
+ To commit end-user documentation changes to trunk or a branch, ask the user 
to submit only changes made to the *.apt files in {{{src/docs}}}. Apply that 
patch, run {{{mvn site}}} to generate the html, and then {{{mvn site-deploy}} 
to publish.  End-user documentation is only published to the web when releases 
are made, as described in [[Chukwa_How_To_Release]].
  
  To commit changes to the website and re-publish them: {{{
  svn co https://svn.apache.org/repos/asf/incubator/chukwa/site


[Hadoop Wiki] Trivial Update of Chukwa_How_To_Contribute by EricYang

2013-01-10 Thread Apache Wiki
Dear Wiki user,

You have subscribed to a wiki page or wiki category on Hadoop Wiki for change 
notification.

The Chukwa_How_To_Contribute page has been changed by EricYang:
http://wiki.apache.org/hadoop/Chukwa_How_To_Contribute?action=diffrev1=5rev2=6

   1. End-user documentation, versioned with releases; and,
   1. The website.  This is maintained separately in subversion, republished as 
it is changed.
  
- To commit end-user documentation changes to trunk or a branch, ask the user 
to submit only changes made to the *.apt files in {{{src/docs}}}. Apply that 
patch, run {{{mvn site}}} to generate the html, and then {{{mvn site-deploy}} 
to publish.  End-user documentation is only published to the web when releases 
are made, as described in [[Chukwa_How_To_Release]].
+ To commit end-user documentation changes to trunk or a branch, ask the user 
to submit only changes made to the *.apt files in {{{src/docs}}}. Apply that 
patch, run {{{mvn site}}} to generate the html, and then {{{mvn site-deploy}}} 
to publish.  End-user documentation is only published to the web when releases 
are made, as described in [[Chukwa_How_To_Release]].
  
  To commit changes to the website and re-publish them: {{{
  svn co https://svn.apache.org/repos/asf/incubator/chukwa/site


svn commit: r1431878 - in /hadoop/common/site: common/author/src/documentation/content/xdocs/ common/publish/ hdfs/author/src/documentation/content/xdocs/ hdfs/publish/ main/author/src/documentation/c

2013-01-10 Thread cos
Author: cos
Date: Fri Jan 11 04:46:36 2013
New Revision: 1431878

URL: http://svn.apache.org/viewvc?rev=1431878view=rev
Log:
Change Committers affiliation for cos

Modified:
hadoop/common/site/common/author/src/documentation/content/xdocs/credits.xml
hadoop/common/site/common/publish/credits.html
hadoop/common/site/common/publish/credits.pdf
hadoop/common/site/hdfs/author/src/documentation/content/xdocs/credits.xml
hadoop/common/site/hdfs/publish/credits.html
hadoop/common/site/hdfs/publish/credits.pdf
hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml
hadoop/common/site/main/publish/who.html
hadoop/common/site/main/publish/who.pdf

hadoop/common/site/mapreduce/author/src/documentation/content/xdocs/credits.xml
hadoop/common/site/mapreduce/publish/credits.html
hadoop/common/site/mapreduce/publish/credits.pdf

Modified: 
hadoop/common/site/common/author/src/documentation/content/xdocs/credits.xml
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/common/author/src/documentation/content/xdocs/credits.xml?rev=1431878r1=1431877r2=1431878view=diff
==
--- 
hadoop/common/site/common/author/src/documentation/content/xdocs/credits.xml 
(original)
+++ 
hadoop/common/site/common/author/src/documentation/content/xdocs/credits.xml 
Fri Jan 11 04:46:36 2013
@@ -101,7 +101,7 @@
   tr
 tdcos/td
 tda href=http://people.apache.org/~cos;Konstantin Boudnik/a/td
-tdKarmasphere/td
+tdWANdisco/td
 td/td
 td-8/td
   /tr

Modified: hadoop/common/site/common/publish/credits.html
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/common/publish/credits.html?rev=1431878r1=1431877r2=1431878view=diff
==
--- hadoop/common/site/common/publish/credits.html (original)
+++ hadoop/common/site/common/publish/credits.html Fri Jan 11 04:46:36 2013
@@ -3,7 +3,7 @@
 head
 META http-equiv=Content-Type content=text/html; charset=UTF-8
 meta content=Apache Forrest name=Generator
-meta name=Forrest-version content=0.9
+meta name=Forrest-version content=0.8
 meta name=Forrest-skin-name content=hadoop-pelt
 titleHadoop Common Credits/title
 link type=text/css href=skin/basic.css rel=stylesheet
@@ -214,7 +214,7 @@ document.write(Last Published:  + docu
 /div
 
 
-a name=N1000D/aa name=Committers/a
+a name=N1000C/aa name=Committers/a
 h2 class=h3Committers/h2
 div class=section
 pHadoop Common active committers include:/p
@@ -335,7 +335,7 @@ document.write(Last Published:  + docu
 
 td colspan=1 rowspan=1cos/td
 td colspan=1 rowspan=1a 
href=http://people.apache.org/~cos;Konstantin Boudnik/a/td
-td colspan=1 rowspan=1Karmasphere/td
+td colspan=1 rowspan=1WANdisco/td
 td colspan=1 rowspan=1/td
 td colspan=1 rowspan=1-8/td
   
@@ -831,16 +831,16 @@ document.write(Last Published:  + docu
 /div
 
 
-a name=N10644/aa name=Contributors/a
+a name=N10643/aa name=Contributors/a
 h2 class=h3Contributors/h2
 div class=section
 pHadoop Common contributors and their contributions are listed at Apache 
-a 
href=https://issues.apache.org/jira/secure/ConfigureReport.jspa?versionId=-2amp;selectedProjectId=12310240amp;reportKey=com.sourcelabs.jira.plugin.report.contributions%3Acontributionreportamp;Next=Next;JIRA/a.
+a 
href=https://issues.apache.org/jira/secure/ConfigureReport.jspa?versionId=-2selectedProjectId=12310240reportKey=com.sourcelabs.jira.plugin.report.contributions%3AcontributionreportNext=Next;JIRA/a.
 /p
 /div
 
 
-a name=N10652/aa name=Emeriti/a
+a name=N10651/aa name=Emeriti/a
 h2 class=h3Emeriti/h2
 div class=section
 pHadoop Common committers who are no longer active include:/p

Modified: hadoop/common/site/common/publish/credits.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/common/publish/credits.pdf?rev=1431878r1=1431877r2=1431878view=diff
==
Binary files - no diff available.

Modified: 
hadoop/common/site/hdfs/author/src/documentation/content/xdocs/credits.xml
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/hdfs/author/src/documentation/content/xdocs/credits.xml?rev=1431878r1=1431877r2=1431878view=diff
==
--- hadoop/common/site/hdfs/author/src/documentation/content/xdocs/credits.xml 
(original)
+++ hadoop/common/site/hdfs/author/src/documentation/content/xdocs/credits.xml 
Fri Jan 11 04:46:36 2013
@@ -69,7 +69,7 @@
   tr
 tdcos/td
 tda href=http://people.apache.org/~cos;Konstantin Boudnik/a/td
-tdKarmasphere/td
+tdWANdisco/td
 td/td
 td-8/td
   /tr

Modified: hadoop/common/site/hdfs/publish/credits.html
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/hdfs/publish/credits.html?rev=1431878r1=1431877r2=1431878view=diff