svn commit: r1483209 [2/2] - in /hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common: ./ src/ src/main/java/org/apache/hadoop/conf/ src/main/java/org/apache/hadoop/net/ src/main/java/o

2013-05-16 Thread todd
Added: 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocket.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocket.java?rev=1483209view=auto
==
--- 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocket.java
 (added)
+++ 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocket.java
 Thu May 16 07:02:57 2013
@@ -0,0 +1,706 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.net.unix;
+
+import java.io.File;
+import java.io.FileDescriptor;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.net.SocketTimeoutException;
+import java.nio.ByteBuffer;
+import java.nio.channels.AsynchronousCloseException;
+import java.nio.channels.ClosedChannelException;
+import java.util.Arrays;
+import java.util.concurrent.ArrayBlockingQueue;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.Assume;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.commons.lang.exception.ExceptionUtils;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.net.unix.DomainSocket.DomainChannel;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.Shell;
+
+import com.google.common.io.Files;
+
+public class TestDomainSocket {
+  private static TemporarySocketDirectory sockDir;
+
+  @BeforeClass
+  public static void init() {
+sockDir = new TemporarySocketDirectory();
+DomainSocket.disableBindPathValidation();
+  }
+
+  @AfterClass
+  public static void shutdown() throws IOException {
+sockDir.close();
+  }
+  
+  @Before
+  public void before() {
+Assume.assumeTrue(DomainSocket.getLoadingFailureReason() == null);
+  }
+
+  /**
+   * Test that we can create a socket and close it, even if it hasn't been
+   * opened.
+   *
+   * @throws IOException
+   */
+  @Test(timeout=18)
+  public void testSocketCreateAndClose() throws IOException {
+DomainSocket serv = DomainSocket.bindAndListen(
+  new File(sockDir.getDir(), test_sock_create_and_close).
+getAbsolutePath());
+serv.close();
+  }
+
+  /**
+   * Test DomainSocket path setting and getting.
+   *
+   * @throws IOException
+   */
+  @Test(timeout=18)
+  public void testSocketPathSetGet() throws IOException {
+Assert.assertEquals(/var/run/hdfs/sock.100,
+DomainSocket.getEffectivePath(/var/run/hdfs/sock._PORT, 100));
+  }
+
+  /**
+   * Test that we get a read result of -1 on EOF.
+   *
+   * @throws IOException
+   */
+  @Test(timeout=18)
+  public void testSocketReadEof() throws Exception {
+final String TEST_PATH = new File(sockDir.getDir(),
+testSocketReadEof).getAbsolutePath();
+final DomainSocket serv = DomainSocket.bindAndListen(TEST_PATH);
+ExecutorService exeServ = Executors.newSingleThreadExecutor();
+CallableVoid callable = new CallableVoid() {
+  public Void call(){
+DomainSocket conn;
+try {
+  conn = serv.accept();
+} catch (IOException e) {
+  throw new RuntimeException(unexpected IOException, e);
+}
+byte buf[] = new byte[100];
+for (int i = 0; i  buf.length; i++) {
+  buf[i] = 0;
+}
+try {
+  Assert.assertEquals(-1, conn.getInputStream().read());
+} catch (IOException e) {
+  throw new RuntimeException(unexpected IOException, e);
+}
+return null;
+  }
+};
+FutureVoid future = exeServ.submit(callable);
+DomainSocket conn = DomainSocket.connect(serv.getPath());
+Thread.sleep(50);
+   

svn commit: r1483209 - /hadoop/common/branches/branch-2/hadoop-project/src/site/site.xml

2013-05-16 Thread todd
Author: todd
Date: Thu May 16 07:02:57 2013
New Revision: 1483209

URL: http://svn.apache.org/r1483209
Log:
HDFS-347. DFS read performance suboptimal when client co-located on nodes with 
data. Contributed by Colin Patrick McCabe.

Modified:
hadoop/common/branches/branch-2/hadoop-project/src/site/site.xml

Modified: hadoop/common/branches/branch-2/hadoop-project/src/site/site.xml
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-project/src/site/site.xml?rev=1483209r1=1483208r2=1483209view=diff
==
--- hadoop/common/branches/branch-2/hadoop-project/src/site/site.xml (original)
+++ hadoop/common/branches/branch-2/hadoop-project/src/site/site.xml Thu May 16 
07:02:57 2013
@@ -73,6 +73,8 @@
   item name=C API libhdfs 
href=hadoop-project-dist/hadoop-hdfs/LibHdfs.html/
   item name=WebHDFS REST API 
href=hadoop-project-dist/hadoop-hdfs/WebHDFS.html/
   item name=HttpFS Gateway href=hadoop-hdfs-httpfs/index.html/
+  item name=Short Circuit Local Reads 
+  href=hadoop-project-dist/hadoop-hdfs/ShortCircuitLocalReads.html/
 /menu
 
 menu name=MapReduce inherit=top




svn commit: r1483513 - /hadoop/common/branches/branch-0.23/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java

2013-05-16 Thread kihwal
Author: kihwal
Date: Thu May 16 19:12:37 2013
New Revision: 1483513

URL: http://svn.apache.org/r1483513
Log:
HDFS-4477. Secondary namenode may retain old tokens. Contributed by Daryn Sharp.

Modified:

hadoop/common/branches/branch-0.23/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java

Modified: 
hadoop/common/branches/branch-0.23/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java?rev=1483513r1=1483512r2=1483513view=diff
==
--- 
hadoop/common/branches/branch-0.23/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
 (original)
+++ 
hadoop/common/branches/branch-0.23/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
 Thu May 16 19:12:37 2013
@@ -27,8 +27,10 @@ import java.io.DataInputStream;
 import java.io.IOException;
 import java.util.Arrays;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.Iterator;
 import java.util.Map;
+import java.util.Set;
 
 import javax.crypto.SecretKey;
 
@@ -140,6 +142,10 @@ extends AbstractDelegationTokenIdentifie
 return;
   }
   
+  protected void logExpireToken(TokenIdent ident) throws IOException {
+return;
+  }
+
   /** 
* Update the current master key 
* This is called once by startThreads before tokenRemoverThread is created, 
@@ -359,15 +365,25 @@ extends AbstractDelegationTokenIdentifie
   }
   
   /** Remove expired delegation tokens from cache */
-  private synchronized void removeExpiredToken() {
+  private void removeExpiredToken() throws IOException {
 long now = System.currentTimeMillis();
-IteratorDelegationTokenInformation i = currentTokens.values().iterator();
-while (i.hasNext()) {
-  long renewDate = i.next().getRenewDate();
-  if (now  renewDate) {
-i.remove();
+SetTokenIdent expiredTokens = new HashSetTokenIdent();
+synchronized (this) {
+  IteratorMap.EntryTokenIdent, DelegationTokenInformation i =
+  currentTokens.entrySet().iterator();
+  while (i.hasNext()) {
+Map.EntryTokenIdent, DelegationTokenInformation entry = i.next();
+long renewDate = entry.getValue().getRenewDate();
+if (renewDate  now) {
+  expiredTokens.add(entry.getKey());
+  i.remove();
+}
   }
 }
+// don't hold lock on 'this' to avoid edit log updates blocking token ops
+for (TokenIdent ident : expiredTokens) {
+  logExpireToken(ident);
+}
   }
 
   public void stopThreads() {




svn commit: r1483612 - in /hadoop/common/trunk/hadoop-common-project/hadoop-common: CHANGES.txt src/main/native/src/org/apache/hadoop/net/unix/DomainSocket.c

2013-05-16 Thread atm
Author: atm
Date: Fri May 17 00:06:56 2013
New Revision: 1483612

URL: http://svn.apache.org/r1483612
Log:
HADOOP-9566. Performing direct read using libhdfs sometimes raises SIGPIPE 
(which in turn throws SIGABRT) causing client crashes. Contributed by Colin 
Patrick McCabe.

Modified:
hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt

hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/net/unix/DomainSocket.c

Modified: hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt?rev=1483612r1=1483611r2=1483612view=diff
==
--- hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt 
(original)
+++ hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt Fri May 
17 00:06:56 2013
@@ -726,6 +726,10 @@ Release 2.0.5-beta - UNRELEASED
 HADOOP-9563. Fix incompatibility introduced by HADOOP-9523.
 (Tian Hong Wang via suresh)
 
+HADOOP-9566. Performing direct read using libhdfs sometimes raises SIGPIPE
+(which in turn throws SIGABRT) causing client crashes. (Colin Patrick
+McCabe via atm)
+
 Release 2.0.4-alpha - 2013-04-25 
 
   INCOMPATIBLE CHANGES

Modified: 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/net/unix/DomainSocket.c
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/net/unix/DomainSocket.c?rev=1483612r1=1483611r2=1483612view=diff
==
--- 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/net/unix/DomainSocket.c
 (original)
+++ 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/net/unix/DomainSocket.c
 Fri May 17 00:06:56 2013
@@ -16,8 +16,7 @@
  * limitations under the License.
  */
 
-#define _GNU_SOURCE
-
+#include config.h
 #include exception.h
 #include org/apache/hadoop/io/nativeio/file_descriptor.h
 #include org_apache_hadoop.h
@@ -31,6 +30,7 @@
 #include stdint.h
 #include stdio.h
 #include stdlib.h
+#include string.h
 #include sys/ioctl.h /* for FIONREAD */
 #include sys/socket.h
 #include sys/stat.h
@@ -47,6 +47,15 @@
 #define DEFAULT_SEND_TIMEOUT 12
 #define LISTEN_BACKLOG 128
 
+/* In Linux, you can pass the MSG_NOSIGNAL flag to send, sendto, etc. to 
prevent
+ * those functions from generating SIGPIPE.  HDFS-4831 for details.
+ */
+#ifdef MSG_NOSIGNAL
+#define PLATFORM_SEND_FLAGS MSG_NOSIGNAL
+#else
+#define PLATFORM_SEND_FLAGS 0
+#endif
+
 /**
  * Can't pass more than this number of file descriptors in a single message.
  */
@@ -176,6 +185,19 @@ static jthrowable setup(JNIEnv *env, int
 is %zd bytes., sizeof(addr.sun_path) - 1);
 goto done;
   }
+#ifdef SO_NOSIGPIPE
+  /* On MacOS and some BSDs, SO_NOSIGPIPE will keep send and sendto from 
causing
+   * EPIPE.  Note: this will NOT help when using write or writev, only with
+   * send, sendto, sendmsg, etc.  See HDFS-4831.
+   */
+  ret = 1;
+  if (setsockopt(fd, SOL_SOCKET, SO_NOSIGPIPE, (void *)ret, sizeof(ret))) {
+ret = errno;
+jthr = newSocketException(env, ret,
+error setting SO_NOSIGPIPE on socket: error %s, terror(ret));
+goto done;
+  }
+#endif
   if (doConnect) {
 RETRY_ON_EINTR(ret, connect(fd, 
 (struct sockaddr*)addr, sizeof(addr)));
@@ -583,7 +605,7 @@ static jthrowable write_fully(JNIEnv *en
   int err, res;
 
   while (amt  0) {
-res = write(fd, buf, amt);
+res = send(fd, buf, amt, PLATFORM_SEND_FLAGS);
 if (res  0) {
   err = errno;
   if (err == EINTR) {
@@ -685,7 +707,7 @@ jint offset, jint length)
   goto done;
 }
   }
-  RETRY_ON_EINTR(ret, sendmsg(fd, socketMsg, 0));
+  RETRY_ON_EINTR(ret, sendmsg(fd, socketMsg, PLATFORM_SEND_FLAGS));
   if (ret  0) {
 ret = errno;
 jthr = newSocketException(env, ret, sendmsg(2) error: %s, terror(ret));




svn commit: r1483613 - in /hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common: CHANGES.txt src/main/native/src/org/apache/hadoop/net/unix/DomainSocket.c

2013-05-16 Thread atm
Author: atm
Date: Fri May 17 00:11:11 2013
New Revision: 1483613

URL: http://svn.apache.org/r1483613
Log:
HADOOP-9566. Performing direct read using libhdfs sometimes raises SIGPIPE 
(which in turn throws SIGABRT) causing client crashes. Contributed by Colin 
Patrick McCabe.

Modified:

hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt

hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/net/unix/DomainSocket.c

Modified: 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt?rev=1483613r1=1483612r2=1483613view=diff
==
--- 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt 
(original)
+++ 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt 
Fri May 17 00:11:11 2013
@@ -185,6 +185,10 @@ Release 2.0.5-beta - UNRELEASED
 HADOOP-9563. Fix incompatibility introduced by HADOOP-9523.
 (Tian Hong Wang via suresh)
 
+HADOOP-9566. Performing direct read using libhdfs sometimes raises SIGPIPE
+(which in turn throws SIGABRT) causing client crashes. (Colin Patrick
+McCabe via atm)
+
 Release 2.0.4-alpha - 2013-04-25 
 
   INCOMPATIBLE CHANGES

Modified: 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/net/unix/DomainSocket.c
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/net/unix/DomainSocket.c?rev=1483613r1=1483612r2=1483613view=diff
==
--- 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/net/unix/DomainSocket.c
 (original)
+++ 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/net/unix/DomainSocket.c
 Fri May 17 00:11:11 2013
@@ -16,8 +16,7 @@
  * limitations under the License.
  */
 
-#define _GNU_SOURCE
-
+#include config.h
 #include exception.h
 #include org/apache/hadoop/io/nativeio/file_descriptor.h
 #include org_apache_hadoop.h
@@ -31,6 +30,7 @@
 #include stdint.h
 #include stdio.h
 #include stdlib.h
+#include string.h
 #include sys/ioctl.h /* for FIONREAD */
 #include sys/socket.h
 #include sys/stat.h
@@ -47,6 +47,15 @@
 #define DEFAULT_SEND_TIMEOUT 12
 #define LISTEN_BACKLOG 128
 
+/* In Linux, you can pass the MSG_NOSIGNAL flag to send, sendto, etc. to 
prevent
+ * those functions from generating SIGPIPE.  HDFS-4831 for details.
+ */
+#ifdef MSG_NOSIGNAL
+#define PLATFORM_SEND_FLAGS MSG_NOSIGNAL
+#else
+#define PLATFORM_SEND_FLAGS 0
+#endif
+
 /**
  * Can't pass more than this number of file descriptors in a single message.
  */
@@ -176,6 +185,19 @@ static jthrowable setup(JNIEnv *env, int
 is %zd bytes., sizeof(addr.sun_path) - 1);
 goto done;
   }
+#ifdef SO_NOSIGPIPE
+  /* On MacOS and some BSDs, SO_NOSIGPIPE will keep send and sendto from 
causing
+   * EPIPE.  Note: this will NOT help when using write or writev, only with
+   * send, sendto, sendmsg, etc.  See HDFS-4831.
+   */
+  ret = 1;
+  if (setsockopt(fd, SOL_SOCKET, SO_NOSIGPIPE, (void *)ret, sizeof(ret))) {
+ret = errno;
+jthr = newSocketException(env, ret,
+error setting SO_NOSIGPIPE on socket: error %s, terror(ret));
+goto done;
+  }
+#endif
   if (doConnect) {
 RETRY_ON_EINTR(ret, connect(fd, 
 (struct sockaddr*)addr, sizeof(addr)));
@@ -583,7 +605,7 @@ static jthrowable write_fully(JNIEnv *en
   int err, res;
 
   while (amt  0) {
-res = write(fd, buf, amt);
+res = send(fd, buf, amt, PLATFORM_SEND_FLAGS);
 if (res  0) {
   err = errno;
   if (err == EINTR) {
@@ -685,7 +707,7 @@ jint offset, jint length)
   goto done;
 }
   }
-  RETRY_ON_EINTR(ret, sendmsg(fd, socketMsg, 0));
+  RETRY_ON_EINTR(ret, sendmsg(fd, socketMsg, PLATFORM_SEND_FLAGS));
   if (ret  0) {
 ret = errno;
 jthr = newSocketException(env, ret, sendmsg(2) error: %s, terror(ret));