[9/9] flink git commit: [FLINK-4813] [test-utils] Make the hadoop-minikdc dependency optional

2017-02-20 Thread sewen
[FLINK-4813] [test-utils] Make the hadoop-minikdc dependency optional

With this change, any project using flink-test-utils which also requires
SecureTestEnvironment must add a dependency to hadoop-minikdc itself, e.g. in
pom.xml:

   ...
   
 
   org.apache.hadoop
   hadoop-minikdc
   ${minikdc.version}
   compile
 
   ...
   
   ...

   
 
   
   
   org.apache.felix
   maven-bundle-plugin
   3.0.1
   true
   true
 
   ...

This closes #3322


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/2c08f754
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/2c08f754
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/2c08f754

Branch: refs/heads/release-1.2
Commit: 2c08f7548ad8388ab081f82cd6f6c916c8900ee0
Parents: a082335
Author: Nico Kruber 
Authored: Wed Feb 15 14:24:32 2017 +0100
Committer: Stephan Ewen 
Committed: Mon Feb 20 02:07:55 2017 +0100

--
 .../flink-test-utils/pom.xml| 23 +
 .../flink/test/util/SecureTestEnvironment.java  | 34 ++--
 pom.xml | 10 --
 3 files changed, 55 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/2c08f754/flink-test-utils-parent/flink-test-utils/pom.xml
--
diff --git a/flink-test-utils-parent/flink-test-utils/pom.xml 
b/flink-test-utils-parent/flink-test-utils/pom.xml
index 875a2bf..da10cda 100644
--- a/flink-test-utils-parent/flink-test-utils/pom.xml
+++ b/flink-test-utils-parent/flink-test-utils/pom.xml
@@ -83,8 +83,31 @@ under the License.
hadoop-minikdc
${minikdc.version}
compile
+   
+   true

 

 
+   
+   
+   
+   
+   org.apache.felix
+   maven-bundle-plugin
+   3.0.1
+   true
+   true
+   
+   
+   
+
 

http://git-wip-us.apache.org/repos/asf/flink/blob/2c08f754/flink-test-utils-parent/flink-test-utils/src/main/java/org/apache/flink/test/util/SecureTestEnvironment.java
--
diff --git 
a/flink-test-utils-parent/flink-test-utils/src/main/java/org/apache/flink/test/util/SecureTestEnvironment.java
 
b/flink-test-utils-parent/flink-test-utils/src/main/java/org/apache/flink/test/util/SecureTestEnvironment.java
index 10450c3..febd074 100644
--- 
a/flink-test-utils-parent/flink-test-utils/src/main/java/org/apache/flink/test/util/SecureTestEnvironment.java
+++ 
b/flink-test-utils-parent/flink-test-utils/src/main/java/org/apache/flink/test/util/SecureTestEnvironment.java
@@ -37,9 +37,39 @@ import java.util.Properties;
 /**
  * Helper {@link SecureTestEnvironment} to handle MiniKDC lifecycle.
  * This class can be used to start/stop MiniKDC and create secure 
configurations for MiniDFSCluster
- * and MiniYarn
+ * and MiniYarn.
+ *
+ * If you use this class in your project, please make sure to add a dependency 
to
+ * hadoop-minikdc, e.g. in your pom.xml:
+ * {@code
+ * ...
+ * 
+ *   
+ * org.apache.hadoop
+ * hadoop-minikdc
+ * ${minikdc.version}
+ * compile
+ *   
+ * ...
+ * 
+ * ...
+ *
+ * 
+ *   
+ * 
+ * 
+ * org.apache.felix
+ * maven-bundle-plugin
+ * 3.0.1
+ * true
+ * true
+ *   
+ * ...
+ * }
  */
-
 public class SecureTestEnvironment {
 
protected static final Logger LOG = 
LoggerFactory.getLogger(SecureTestEnvironment.class);

http://git-wip-us.apache.org/repos/asf/flink/blob/2c08f754/pom.xml
--
diff --git a/pom.xml b/pom.xml
index a1bca92..b3d6483 100644
--- a/pom.xml
+++ b/pom.xml
@@ -1113,16 +1113,6 @@ under the License.


 
-   
-   
-   org.apache.felix
-   maven-bundle-plugin
-   3.0.1
-   true
-   true
-   
-

 
 



[2/2] flink git commit: [FLINK-5723] [web frontend] Use 'Used' instead of 'Initial' in TaskManager memory consumption view

2017-02-20 Thread sewen
[FLINK-5723] [web frontend] Use 'Used' instead of 'Initial' in TaskManager 
memory consumption view

This closes #3275


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/03e6c249
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/03e6c249
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/03e6c249

Branch: refs/heads/master
Commit: 03e6c249156fbbfeef39397a70c70bb905469d09
Parents: 4810937
Author: unknown 
Authored: Mon Feb 6 23:18:38 2017 +0800
Committer: Stephan Ewen 
Committed: Mon Feb 20 12:57:47 2017 +0100

--
 .../app/partials/taskmanager/taskmanager.metrics.jade  | 2 +-
 .../web/partials/taskmanager/taskmanager.metrics.html  | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/03e6c249/flink-runtime-web/web-dashboard/app/partials/taskmanager/taskmanager.metrics.jade
--
diff --git 
a/flink-runtime-web/web-dashboard/app/partials/taskmanager/taskmanager.metrics.jade
 
b/flink-runtime-web/web-dashboard/app/partials/taskmanager/taskmanager.metrics.jade
index b7e8fec..e348a5b 100644
--- 
a/flink-runtime-web/web-dashboard/app/partials/taskmanager/taskmanager.metrics.jade
+++ 
b/flink-runtime-web/web-dashboard/app/partials/taskmanager/taskmanager.metrics.jade
@@ -45,7 +45,7 @@ div(ng-if="metrics.id")
   tr
 th Type
 th Committed
-th Initial
+th Used
 th Maximum
 tbody
   tr

http://git-wip-us.apache.org/repos/asf/flink/blob/03e6c249/flink-runtime-web/web-dashboard/web/partials/taskmanager/taskmanager.metrics.html
--
diff --git 
a/flink-runtime-web/web-dashboard/web/partials/taskmanager/taskmanager.metrics.html
 
b/flink-runtime-web/web-dashboard/web/partials/taskmanager/taskmanager.metrics.html
index 53aaa75..3372db8 100644
--- 
a/flink-runtime-web/web-dashboard/web/partials/taskmanager/taskmanager.metrics.html
+++ 
b/flink-runtime-web/web-dashboard/web/partials/taskmanager/taskmanager.metrics.html
@@ -50,7 +50,7 @@ limitations under the License.
   
 Type
 Committed
-Initial
+Used
 Maximum
   
 



[1/2] flink git commit: [FLINK-5749] [build] Unset HADOOP_HOME and HADOOP_CONF_DIR variables for tests

2017-02-20 Thread sewen
Repository: flink
Updated Branches:
  refs/heads/master f113d7945 -> 03e6c2491


[FLINK-5749] [build] Unset HADOOP_HOME and HADOOP_CONF_DIR variables for tests

This unsets the HADOOP_HOME and HADOOP_CONF_DIR envirobment variables for 
tests, to avoid
that the tests pick those variable up from build servers and produce unexpected 
test
results.

This closes #3288


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/48109378
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/48109378
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/48109378

Branch: refs/heads/master
Commit: 48109378bc337736c7469de0fe08ec12f7a8f76f
Parents: f113d79
Author: wenlong.lwl 
Authored: Fri Jan 13 14:12:17 2017 +0800
Committer: Stephan Ewen 
Committed: Mon Feb 20 12:55:28 2017 +0100

--
 .../flink-connector-filesystem/pom.xml|  6 ++
 .../flink-hadoop-compatibility/pom.xml| 13 +
 flink-fs-tests/pom.xml| 18 ++
 flink-tests/pom.xml   |  5 +
 4 files changed, 42 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/48109378/flink-connectors/flink-connector-filesystem/pom.xml
--
diff --git a/flink-connectors/flink-connector-filesystem/pom.xml 
b/flink-connectors/flink-connector-filesystem/pom.xml
index 6e0da2a..a37cb68 100644
--- a/flink-connectors/flink-connector-filesystem/pom.xml
+++ b/flink-connectors/flink-connector-filesystem/pom.xml
@@ -155,6 +155,12 @@ under the License.
-->
1
false
+
+   
+   
+   
+   
+   




http://git-wip-us.apache.org/repos/asf/flink/blob/48109378/flink-connectors/flink-hadoop-compatibility/pom.xml
--
diff --git a/flink-connectors/flink-hadoop-compatibility/pom.xml 
b/flink-connectors/flink-hadoop-compatibility/pom.xml
index fe25376..46e20ef 100644
--- a/flink-connectors/flink-hadoop-compatibility/pom.xml
+++ b/flink-connectors/flink-hadoop-compatibility/pom.xml
@@ -175,6 +175,19 @@ under the License.

${project.basedir}/../../tools/maven/scalastyle-config.xml


+
+   
+   org.apache.maven.plugins
+   maven-surefire-plugin
+   
+   
+   
+   
+   
+   
+   
+   
+


 

http://git-wip-us.apache.org/repos/asf/flink/blob/48109378/flink-fs-tests/pom.xml
--
diff --git a/flink-fs-tests/pom.xml b/flink-fs-tests/pom.xml
index c5ee856..84bba0a 100644
--- a/flink-fs-tests/pom.xml
+++ b/flink-fs-tests/pom.xml
@@ -96,4 +96,22 @@ under the License.

${hadoop.version}


+
+   
+   
+   
+   org.apache.maven.plugins
+   maven-surefire-plugin
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+
+
 

http://git-wip-us.apache.org/repos/asf/flink/blob/48109378/flink-tests/pom.xml
--
diff --git a/flink-tests/pom.xml b/flink-tests/pom.xml
index e9f0c66..e5e3e89 100644
--- a/flink-tests/pom.xml
+++ b/flink-tests/pom.xml
@@ -343,6 +343,11 @@ under the License.

org.apache.curator:curator-framework

false
+   
+   
+   
+  

[2/2] flink git commit: [FLINK-5723] [web frontend] Use 'Used' instead of 'Initial' in TaskManager memory consumption view

2017-02-20 Thread sewen
[FLINK-5723] [web frontend] Use 'Used' instead of 'Initial' in TaskManager 
memory consumption view

This closes #3275


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/e3e3c2a7
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/e3e3c2a7
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/e3e3c2a7

Branch: refs/heads/release-1.2
Commit: e3e3c2a7f9c8dd8576e0e27b2efddb7ff42c7c0d
Parents: 24aff09
Author: unknown 
Authored: Mon Feb 6 23:18:38 2017 +0800
Committer: Stephan Ewen 
Committed: Mon Feb 20 12:58:12 2017 +0100

--
 .../app/partials/taskmanager/taskmanager.metrics.jade  | 2 +-
 .../web/partials/taskmanager/taskmanager.metrics.html  | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/e3e3c2a7/flink-runtime-web/web-dashboard/app/partials/taskmanager/taskmanager.metrics.jade
--
diff --git 
a/flink-runtime-web/web-dashboard/app/partials/taskmanager/taskmanager.metrics.jade
 
b/flink-runtime-web/web-dashboard/app/partials/taskmanager/taskmanager.metrics.jade
index b7e8fec..e348a5b 100644
--- 
a/flink-runtime-web/web-dashboard/app/partials/taskmanager/taskmanager.metrics.jade
+++ 
b/flink-runtime-web/web-dashboard/app/partials/taskmanager/taskmanager.metrics.jade
@@ -45,7 +45,7 @@ div(ng-if="metrics.id")
   tr
 th Type
 th Committed
-th Initial
+th Used
 th Maximum
 tbody
   tr

http://git-wip-us.apache.org/repos/asf/flink/blob/e3e3c2a7/flink-runtime-web/web-dashboard/web/partials/taskmanager/taskmanager.metrics.html
--
diff --git 
a/flink-runtime-web/web-dashboard/web/partials/taskmanager/taskmanager.metrics.html
 
b/flink-runtime-web/web-dashboard/web/partials/taskmanager/taskmanager.metrics.html
index 53aaa75..3372db8 100644
--- 
a/flink-runtime-web/web-dashboard/web/partials/taskmanager/taskmanager.metrics.html
+++ 
b/flink-runtime-web/web-dashboard/web/partials/taskmanager/taskmanager.metrics.html
@@ -50,7 +50,7 @@ limitations under the License.
   
 Type
 Committed
-Initial
+Used
 Maximum
   
 



[1/2] flink git commit: [FLINK-5749] [build] Unset HADOOP_HOME and HADOOP_CONF_DIR variables for tests

2017-02-20 Thread sewen
Repository: flink
Updated Branches:
  refs/heads/release-1.2 048a21c09 -> e3e3c2a7f


[FLINK-5749] [build] Unset HADOOP_HOME and HADOOP_CONF_DIR variables for tests

This unsets the HADOOP_HOME and HADOOP_CONF_DIR envirobment variables for 
tests, to avoid
that the tests pick those variable up from build servers and produce unexpected 
test
results.

This closes #3288


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/24aff09c
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/24aff09c
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/24aff09c

Branch: refs/heads/release-1.2
Commit: 24aff09ce51a6e90cd6c39d89a3f2c5b7d6f4423
Parents: 048a21c
Author: wenlong.lwl 
Authored: Fri Jan 13 14:12:17 2017 +0800
Committer: Stephan Ewen 
Committed: Mon Feb 20 12:55:44 2017 +0100

--
 .../flink-connector-filesystem/pom.xml|  6 ++
 .../flink-hadoop-compatibility/pom.xml| 13 +
 flink-fs-tests/pom.xml| 18 ++
 flink-tests/pom.xml   |  5 +
 4 files changed, 42 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/24aff09c/flink-connectors/flink-connector-filesystem/pom.xml
--
diff --git a/flink-connectors/flink-connector-filesystem/pom.xml 
b/flink-connectors/flink-connector-filesystem/pom.xml
index fbc830a..a582e6d 100644
--- a/flink-connectors/flink-connector-filesystem/pom.xml
+++ b/flink-connectors/flink-connector-filesystem/pom.xml
@@ -155,6 +155,12 @@ under the License.
-->
1
false
+
+   
+   
+   
+   
+   




http://git-wip-us.apache.org/repos/asf/flink/blob/24aff09c/flink-connectors/flink-hadoop-compatibility/pom.xml
--
diff --git a/flink-connectors/flink-hadoop-compatibility/pom.xml 
b/flink-connectors/flink-hadoop-compatibility/pom.xml
index 5938560..9ace671 100644
--- a/flink-connectors/flink-hadoop-compatibility/pom.xml
+++ b/flink-connectors/flink-hadoop-compatibility/pom.xml
@@ -176,6 +176,19 @@ under the License.

${project.basedir}/../../tools/maven/scalastyle-config.xml


+
+   
+   org.apache.maven.plugins
+   maven-surefire-plugin
+   
+   
+   
+   
+   
+   
+   
+   
+


 

http://git-wip-us.apache.org/repos/asf/flink/blob/24aff09c/flink-fs-tests/pom.xml
--
diff --git a/flink-fs-tests/pom.xml b/flink-fs-tests/pom.xml
index f480608..31fee49 100644
--- a/flink-fs-tests/pom.xml
+++ b/flink-fs-tests/pom.xml
@@ -96,4 +96,22 @@ under the License.

${hadoop.version}


+
+   
+   
+   
+   org.apache.maven.plugins
+   maven-surefire-plugin
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+
+
 

http://git-wip-us.apache.org/repos/asf/flink/blob/24aff09c/flink-tests/pom.xml
--
diff --git a/flink-tests/pom.xml b/flink-tests/pom.xml
index 7929e27..57ec3b6 100644
--- a/flink-tests/pom.xml
+++ b/flink-tests/pom.xml
@@ -345,6 +345,11 @@ under the License.

org.apache.curator:curator-framework

false
+   
+   
+   
+

[2/4] flink git commit: [hotfix] [tests] Remove sysout logging in KvStateLocationTest

2017-02-24 Thread sewen
[hotfix] [tests] Remove sysout logging in KvStateLocationTest

This helps keeping test log output free from clutter.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/31f3d65c
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/31f3d65c
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/31f3d65c

Branch: refs/heads/master
Commit: 31f3d65c59034a9c5f40a7de34c0219792507327
Parents: f6e6e7e
Author: Stephan Ewen 
Authored: Thu Feb 23 16:51:14 2017 +0100
Committer: Stephan Ewen 
Committed: Fri Feb 24 12:15:18 2017 +0100

--
 .../java/org/apache/flink/runtime/query/KvStateLocationTest.java   | 2 --
 1 file changed, 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/31f3d65c/flink-runtime/src/test/java/org/apache/flink/runtime/query/KvStateLocationTest.java
--
diff --git 
a/flink-runtime/src/test/java/org/apache/flink/runtime/query/KvStateLocationTest.java
 
b/flink-runtime/src/test/java/org/apache/flink/runtime/query/KvStateLocationTest.java
index ed51f62..cd5c6d5 100644
--- 
a/flink-runtime/src/test/java/org/apache/flink/runtime/query/KvStateLocationTest.java
+++ 
b/flink-runtime/src/test/java/org/apache/flink/runtime/query/KvStateLocationTest.java
@@ -56,8 +56,6 @@ public class KvStateLocationTest {
start = end + 1;
}
 
-   System.out.println(keyGroupRanges);
-
String registrationName = "asdasdasdasd";
 
KvStateLocation location = new KvStateLocation(jobId, 
jobVertexId, numKeyGroups, registrationName);



[4/4] flink git commit: [FLINK-5895] [runtime] Decrease logging aggressiveness of FileSystemSafetyNet

2017-02-24 Thread sewen
[FLINK-5895] [runtime] Decrease logging aggressiveness of FileSystemSafetyNet


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/f6e6e7ec
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/f6e6e7ec
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/f6e6e7ec

Branch: refs/heads/master
Commit: f6e6e7ecf4d287f76698302417a9ff2ffc869477
Parents: 15ae922
Author: Stephan Ewen 
Authored: Thu Feb 23 16:21:46 2017 +0100
Committer: Stephan Ewen 
Committed: Fri Feb 24 12:15:18 2017 +0100

--
 .../java/org/apache/flink/core/fs/FileSystemSafetyNet.java| 7 ---
 .../main/java/org/apache/flink/runtime/taskmanager/Task.java  | 7 +++
 2 files changed, 7 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/f6e6e7ec/flink-core/src/main/java/org/apache/flink/core/fs/FileSystemSafetyNet.java
--
diff --git 
a/flink-core/src/main/java/org/apache/flink/core/fs/FileSystemSafetyNet.java 
b/flink-core/src/main/java/org/apache/flink/core/fs/FileSystemSafetyNet.java
index b18cb13..eb28504 100644
--- a/flink-core/src/main/java/org/apache/flink/core/fs/FileSystemSafetyNet.java
+++ b/flink-core/src/main/java/org/apache/flink/core/fs/FileSystemSafetyNet.java
@@ -21,9 +21,6 @@ package org.apache.flink.core.fs;
 import org.apache.flink.annotation.Internal;
 import org.apache.flink.util.IOUtils;
 
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import java.net.URI;
 
 import static org.apache.flink.util.Preconditions.checkState;
@@ -65,8 +62,6 @@ import static org.apache.flink.util.Preconditions.checkState;
 @Internal
 public class FileSystemSafetyNet {
 
-   private static final Logger LOG = 
LoggerFactory.getLogger(FileSystemSafetyNet.class);
-
/** The map from thread to the safety net registry for that thread */
private static final ThreadLocal REGISTRIES 
= new ThreadLocal<>();
 
@@ -93,7 +88,6 @@ public class FileSystemSafetyNet {
 
SafetyNetCloseableRegistry newRegistry = new 
SafetyNetCloseableRegistry();
REGISTRIES.set(newRegistry);
-   LOG.info("Created new CloseableRegistry {} for {}", 
newRegistry, Thread.currentThread().getName());
}
 
/**
@@ -107,7 +101,6 @@ public class FileSystemSafetyNet {
public static void closeSafetyNetAndGuardedResourcesForThread() {
SafetyNetCloseableRegistry registry = REGISTRIES.get();
if (null != registry) {
-   LOG.info("Ensuring all FileSystem streams are closed 
for {}", Thread.currentThread().getName());
REGISTRIES.remove();
IOUtils.closeQuietly(registry);
}

http://git-wip-us.apache.org/repos/asf/flink/blob/f6e6e7ec/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/Task.java
--
diff --git 
a/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/Task.java 
b/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/Task.java
index c9f17b8..8732c60 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/Task.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/Task.java
@@ -553,6 +553,7 @@ public class Task implements Runnable, TaskActions {
// 
 
// activate safety net for task thread
+   LOG.info("Creating FileSystem stream leak safety net 
for task {}", this);
FileSystemSafetyNet.initializeSafetyNetForThread();
 
// first of all, get a user-code classloader
@@ -792,6 +793,7 @@ public class Task implements Runnable, TaskActions {
removeCachedFiles(distributedCacheEntries, 
fileCache);
 
// close and de-activate safety net for task 
thread
+   LOG.info("Ensuring all FileSystem streams are 
closed for task {}", this); 

FileSystemSafetyNet.closeSafetyNetAndGuardedResourcesForThread();
 
notifyFinalState();
@@ -1138,7 +1140,9 @@ public class Task implements Runnable, TaskActions {
@Override
public void run() {
// activate safety net for 
checkpointing thread
+   LOG.debug("Creating FileSystem 
stream leak safety net for {}", Thread.currentThread().getName());


[1/4] flink git commit: [FLINK-5885] [docs] Fix Cassandra Scala snippet

2017-02-24 Thread sewen
Repository: flink
Updated Branches:
  refs/heads/master 15ae922ad -> 6f3723e83


[FLINK-5885] [docs] Fix Cassandra Scala snippet

This closes #3400


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/6f3723e8
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/6f3723e8
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/6f3723e8

Branch: refs/heads/master
Commit: 6f3723e83ff7f2b862a3f57cebbc78060937a2b8
Parents: 813c258
Author: Andrea Sella 
Authored: Thu Feb 23 10:53:56 2017 +0100
Committer: Stephan Ewen 
Committed: Fri Feb 24 12:15:18 2017 +0100

--
 docs/dev/connectors/cassandra.md | 7 +++
 1 file changed, 3 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/6f3723e8/docs/dev/connectors/cassandra.md
--
diff --git a/docs/dev/connectors/cassandra.md b/docs/dev/connectors/cassandra.md
index 7f76b72..c897779 100644
--- a/docs/dev/connectors/cassandra.md
+++ b/docs/dev/connectors/cassandra.md
@@ -100,12 +100,11 @@ CassandraSink.addSink(input)
 CassandraSink.addSink(input)
   .setQuery("INSERT INTO example.values (id, counter) values (?, ?);")
   .setClusterBuilder(new ClusterBuilder() {
-@Override
-public Cluster buildCluster(Cluster.Builder builder) {
-  return builder.addContactPoint("127.0.0.1").build();
+override def buildCluster(builder: Cluster.Builder): Cluster = {
+  builder.addContactPoint("127.0.0.1").build()
 }
   })
-  .build();
+  .build()
 {% endhighlight %}
 
 



[3/4] flink git commit: [FLINK-5854] [core] Add base Flink Exception classes

2017-02-24 Thread sewen
[FLINK-5854] [core] Add base Flink Exception classes

This closes #3368


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/813c2585
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/813c2585
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/813c2585

Branch: refs/heads/master
Commit: 813c2585a49c673b71678463d719b6a85b778994
Parents: 31f3d65
Author: Stephan Ewen 
Authored: Fri Feb 17 16:24:35 2017 +0100
Committer: Stephan Ewen 
Committed: Fri Feb 24 12:15:18 2017 +0100

--
 .../flink/util/DynamicCodeLoadingException.java | 61 
 .../org/apache/flink/util/FlinkException.java   | 58 +++
 .../flink/util/FlinkRuntimeException.java   | 58 +++
 3 files changed, 177 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/813c2585/flink-core/src/main/java/org/apache/flink/util/DynamicCodeLoadingException.java
--
diff --git 
a/flink-core/src/main/java/org/apache/flink/util/DynamicCodeLoadingException.java
 
b/flink-core/src/main/java/org/apache/flink/util/DynamicCodeLoadingException.java
new file mode 100644
index 000..d18b9d3
--- /dev/null
+++ 
b/flink-core/src/main/java/org/apache/flink/util/DynamicCodeLoadingException.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.util;
+
+import org.apache.flink.annotation.Public;
+
+/**
+ * An exception that is thrown if the dynamic instantiation of code fails.
+ * 
+ * This exception is supposed to "sum up" the zoo of exceptions typically 
thrown around
+ * dynamic code loading and instantiations:
+ * 
+ * {@code
+ * try {
+ * Class.forName(classname).asSubclass(TheType.class).newInstance();
+ * }
+ * catch (ClassNotFoundException | ClassCastException | InstantiationException 
| IllegalAccessException e) {
+ * throw new DynamicCodeLoadingException("Could not load and instantiate " 
+ classname", e);
+ * }
+ * }
+ */
+@Public
+public class DynamicCodeLoadingException extends FlinkException {
+
+   private static final long serialVersionUID = -25138443817255490L;
+
+   /**
+* Creates a new exception with the given cause.
+*
+* @param cause The exception that caused this exception
+*/
+   public DynamicCodeLoadingException(Throwable cause) {
+   super(cause);
+   }
+
+   /**
+* Creates a new exception with the given message and cause.
+*
+* @param message The exception message
+* @param cause The exception that caused this exception
+*/
+   public DynamicCodeLoadingException(String message, Throwable cause) {
+   super(message, cause);
+   }
+}

http://git-wip-us.apache.org/repos/asf/flink/blob/813c2585/flink-core/src/main/java/org/apache/flink/util/FlinkException.java
--
diff --git a/flink-core/src/main/java/org/apache/flink/util/FlinkException.java 
b/flink-core/src/main/java/org/apache/flink/util/FlinkException.java
new file mode 100644
index 000..550ab2c
--- /dev/null
+++ b/flink-core/src/main/java/org/apache/flink/util/FlinkException.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for 

[1/2] flink git commit: [FLINK-5895] [runtime] Decrease logging aggressiveness of FileSystemSafetyNet

2017-02-23 Thread sewen
Repository: flink
Updated Branches:
  refs/heads/release-1.2 db3c5f388 -> b5ec14641


[FLINK-5895] [runtime] Decrease logging aggressiveness of FileSystemSafetyNet


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/74b29f5a
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/74b29f5a
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/74b29f5a

Branch: refs/heads/release-1.2
Commit: 74b29f5a3dc4f1413bbf8addb6b4234a5bfe6581
Parents: db3c5f3
Author: Stephan Ewen 
Authored: Thu Feb 23 16:21:46 2017 +0100
Committer: Stephan Ewen 
Committed: Thu Feb 23 16:26:01 2017 +0100

--
 .../src/main/java/org/apache/flink/core/fs/FileSystem.java   | 6 --
 .../main/java/org/apache/flink/runtime/taskmanager/Task.java | 8 
 2 files changed, 8 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/74b29f5a/flink-core/src/main/java/org/apache/flink/core/fs/FileSystem.java
--
diff --git a/flink-core/src/main/java/org/apache/flink/core/fs/FileSystem.java 
b/flink-core/src/main/java/org/apache/flink/core/fs/FileSystem.java
index d8efcbc..991c718 100644
--- a/flink-core/src/main/java/org/apache/flink/core/fs/FileSystem.java
+++ b/flink-core/src/main/java/org/apache/flink/core/fs/FileSystem.java
@@ -33,8 +33,6 @@ import org.apache.flink.core.fs.local.LocalFileSystem;
 import org.apache.flink.util.IOUtils;
 import org.apache.flink.util.OperatingSystem;
 import org.apache.flink.util.Preconditions;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import javax.annotation.Nullable;
 import java.io.File;
@@ -86,8 +84,6 @@ public abstract class FileSystem {
 
private static final String HADOOP_WRAPPER_SCHEME = "hdwrapper";
 
-   private static final Logger LOG = 
LoggerFactory.getLogger(FileSystem.class);
-
/** This lock guards the methods {@link #initOutPathLocalFS(Path, 
WriteMode, boolean)} and
 * {@link #initOutPathDistFS(Path, WriteMode, boolean)} which are 
otherwise susceptible to races */
private static final ReentrantLock OUTPUT_DIRECTORY_INIT_LOCK = new 
ReentrantLock(true);
@@ -107,7 +103,6 @@ public abstract class FileSystem {
 
SafetyNetCloseableRegistry newRegistry = new 
SafetyNetCloseableRegistry();
REGISTRIES.set(newRegistry);
-   LOG.info("Created new CloseableRegistry " + newRegistry + " for 
{}", Thread.currentThread().getName());
}
 
/**
@@ -118,7 +113,6 @@ public abstract class FileSystem {
public static void 
closeAndDisposeFileSystemCloseableRegistryForThread() {
SafetyNetCloseableRegistry registry = REGISTRIES.get();
if (null != registry) {
-   LOG.info("Ensuring all FileSystem streams are closed 
for {}", Thread.currentThread().getName());
REGISTRIES.remove();
IOUtils.closeQuietly(registry);
}

http://git-wip-us.apache.org/repos/asf/flink/blob/74b29f5a/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/Task.java
--
diff --git 
a/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/Task.java 
b/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/Task.java
index ff81827..d242d7a 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/Task.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/Task.java
@@ -539,6 +539,7 @@ public class Task implements Runnable, TaskActions {
// 
 
// activate safety net for task thread
+   LOG.info("Creating FileSystem stream leak safety net 
for task {}", this);

FileSystem.createAndSetFileSystemCloseableRegistryForThread();
 
// first of all, get a user-code classloader
@@ -763,7 +764,9 @@ public class Task implements Runnable, TaskActions {
 
// remove all files in the distributed cache
removeCachedFiles(distributedCacheEntries, 
fileCache);
+
// close and de-activate safety net for task 
thread
+   LOG.info("Ensuring all FileSystem streams are 
closed for task {}", this);

FileSystem.closeAndDisposeFileSystemCloseableRegistryForThread();
 
notifyFinalState();
@@ -1106,7 +1109,9 @@ public class Task implements Runnable, TaskActions {
@Override

[2/2] flink git commit: [FLINK-5877] [docs] Fix Async I/O Scala snippet

2017-02-23 Thread sewen
[FLINK-5877] [docs] Fix Async I/O Scala snippet

This closes #3383


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/b5ec1464
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/b5ec1464
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/b5ec1464

Branch: refs/heads/release-1.2
Commit: b5ec146413bedf55867e15652c7e29f1e4e2d220
Parents: 74b29f5
Author: Andrea Sella 
Authored: Tue Feb 21 21:18:16 2017 +0100
Committer: Stephan Ewen 
Committed: Thu Feb 23 17:37:27 2017 +0100

--
 docs/dev/stream/asyncio.md | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/b5ec1464/docs/dev/stream/asyncio.md
--
diff --git a/docs/dev/stream/asyncio.md b/docs/dev/stream/asyncio.md
index dbf2b9c..c4414b4 100644
--- a/docs/dev/stream/asyncio.md
+++ b/docs/dev/stream/asyncio.md
@@ -139,7 +139,7 @@ class AsyncDatabaseRequest extends AsyncFunction[String, 
(String, String)] {
 lazy val client: DatabaseClient = new DatabaseClient(host, post, 
credentials)
 
 /** The context used for the future callbacks */
-implicit lazy val executor: ExecutionContext = 
ExecutionContext.fromExecutor(Executors.directExecutor()))
+implicit lazy val executor: ExecutionContext = 
ExecutionContext.fromExecutor(Executors.directExecutor())
 
 
 override def asyncInvoke(str: String, asyncCollector: 
AsyncCollector[(String, String)]): Unit = {
@@ -150,8 +150,8 @@ class AsyncDatabaseRequest extends AsyncFunction[String, 
(String, String)] {
 // set the callback to be executed once the request by the client is 
complete
 // the callback simply forwards the result to the collector
 resultFuture.onSuccess {
-case result: String => 
asyncCollector.collect(Collections.singleton((str, result)));
-})
+case result: String => asyncCollector.collect(Iterable((str, 
result)));
+}
 }
 }
 



flink git commit: [FLINK-5885] [docs] Fix Cassandra Scala snippet

2017-02-23 Thread sewen
Repository: flink
Updated Branches:
  refs/heads/release-1.2 b5ec14641 -> d8c2447bd


[FLINK-5885] [docs] Fix Cassandra Scala snippet

This closes #3400


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/d8c2447b
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/d8c2447b
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/d8c2447b

Branch: refs/heads/release-1.2
Commit: d8c2447bdc0614d02fcdb01eb33f5bab7b26a8c5
Parents: b5ec146
Author: Andrea Sella 
Authored: Thu Feb 23 10:53:56 2017 +0100
Committer: Stephan Ewen 
Committed: Thu Feb 23 17:45:11 2017 +0100

--
 docs/dev/connectors/cassandra.md | 7 +++
 1 file changed, 3 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/d8c2447b/docs/dev/connectors/cassandra.md
--
diff --git a/docs/dev/connectors/cassandra.md b/docs/dev/connectors/cassandra.md
index 7f76b72..c897779 100644
--- a/docs/dev/connectors/cassandra.md
+++ b/docs/dev/connectors/cassandra.md
@@ -100,12 +100,11 @@ CassandraSink.addSink(input)
 CassandraSink.addSink(input)
   .setQuery("INSERT INTO example.values (id, counter) values (?, ?);")
   .setClusterBuilder(new ClusterBuilder() {
-@Override
-public Cluster buildCluster(Cluster.Builder builder) {
-  return builder.addContactPoint("127.0.0.1").build();
+override def buildCluster(builder: Cluster.Builder): Cluster = {
+  builder.addContactPoint("127.0.0.1").build()
 }
   })
-  .build();
+  .build()
 {% endhighlight %}
 
 



[6/7] flink git commit: [FLINK-5887] [checkpointing] Make CheckpointBarrier type immutable.

2017-02-23 Thread sewen
[FLINK-5887] [checkpointing] Make CheckpointBarrier type immutable.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/8ffe75a5
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/8ffe75a5
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/8ffe75a5

Branch: refs/heads/master
Commit: 8ffe75a54f24cbd8e69c455b42a4e969b943a279
Parents: df16e50
Author: Stephan Ewen 
Authored: Wed Feb 22 15:04:46 2017 +0100
Committer: Stephan Ewen 
Committed: Thu Feb 23 18:39:49 2017 +0100

--
 .../runtime/checkpoint/CheckpointOptions.java   |  2 +-
 .../io/network/api/CheckpointBarrier.java   | 66 +++-
 .../io/network/api/CheckpointBarrierTest.java   | 40 ++--
 .../api/serialization/EventSerializerTest.java  | 13 ++--
 4 files changed, 46 insertions(+), 75 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/8ffe75a5/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointOptions.java
--
diff --git 
a/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointOptions.java
 
b/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointOptions.java
index cb98d10..676cf3b 100644
--- 
a/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointOptions.java
+++ 
b/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointOptions.java
@@ -46,7 +46,7 @@ public class CheckpointOptions implements Serializable {
 
private CheckpointOptions(
@Nonnull CheckpointType checkpointType,
-   String targetLocation) {
+   @Nullable  String targetLocation) {
this.checkpointType = checkNotNull(checkpointType);
this.targetLocation = targetLocation;
}

http://git-wip-us.apache.org/repos/asf/flink/blob/8ffe75a5/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/api/CheckpointBarrier.java
--
diff --git 
a/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/api/CheckpointBarrier.java
 
b/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/api/CheckpointBarrier.java
index a42c25d..97ad90f 100644
--- 
a/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/api/CheckpointBarrier.java
+++ 
b/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/api/CheckpointBarrier.java
@@ -18,18 +18,14 @@
 
 package org.apache.flink.runtime.io.network.api;
 
-import static org.apache.flink.util.Preconditions.checkElementIndex;
 import static org.apache.flink.util.Preconditions.checkNotNull;
-import static org.apache.flink.util.Preconditions.checkState;
 
 import java.io.IOException;
 
 import org.apache.flink.core.memory.DataInputView;
 import org.apache.flink.core.memory.DataOutputView;
 import org.apache.flink.runtime.checkpoint.CheckpointOptions;
-import org.apache.flink.runtime.checkpoint.CheckpointOptions.CheckpointType;
 import org.apache.flink.runtime.event.RuntimeEvent;
-import org.apache.flink.util.StringUtils;
 
 /**
  * Checkpoint barriers are used to align checkpoints throughout the streaming 
topology. The
@@ -48,11 +44,9 @@ import org.apache.flink.util.StringUtils;
  */
 public class CheckpointBarrier extends RuntimeEvent {
 
-   private long id;
-   private long timestamp;
-   private CheckpointOptions checkpointOptions;
-
-   public CheckpointBarrier() {}
+   private final long id;
+   private final long timestamp;
+   private final CheckpointOptions checkpointOptions;
 
public CheckpointBarrier(long id, long timestamp, CheckpointOptions 
checkpointOptions) {
this.id = id;
@@ -75,66 +69,48 @@ public class CheckpointBarrier extends RuntimeEvent {
// 

// Serialization
// 

-   
+
+   //
+   //  These methods are inherited form the generic serialization of 
AbstractEvent
+   //  but would require the CheckpointBarrier to be mutable. Since all 
serialization
+   //  for events goes through the EventSerializer class, which has 
special serialization
+   //  for the CheckpointBarrier, we don't need these methods
+   // 
+
@Override
public void write(DataOutputView out) throws IOException {
-   out.writeLong(id);
-   out.writeLong(timestamp);
-   CheckpointType checkpointType = 
checkpointOptions.getCheckpointType();
-
-   out.writeInt(checkpointType.ordinal());
-
-   if 

[1/7] flink git commit: [hotfix] [docs] Fix JavaDoc errors in 'flink-streaming-java'

2017-02-23 Thread sewen
Repository: flink
Updated Branches:
  refs/heads/master 30c9e2b68 -> 417597fbf


[hotfix] [docs] Fix JavaDoc errors in 'flink-streaming-java'


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/1f9f38bf
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/1f9f38bf
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/1f9f38bf

Branch: refs/heads/master
Commit: 1f9f38bf6312529af3ac527bf2f80f2ecee4d62b
Parents: 30c9e2b
Author: Stephan Ewen 
Authored: Wed Feb 22 12:50:16 2017 +0100
Committer: Stephan Ewen 
Committed: Thu Feb 23 17:44:25 2017 +0100

--
 .../api/datastream/AllWindowedStream.java   |  4 +--
 .../api/datastream/WindowedStream.java  |  4 +--
 .../environment/StreamExecutionEnvironment.java |  7 +++---
 .../api/functions/async/AsyncFunction.java  | 26 ++--
 .../api/operators/StreamSourceContexts.java |  6 ++---
 .../api/operators/async/AsyncWaitOperator.java  | 11 +
 .../operators/windowing/MergingWindowSet.java   |  2 +-
 7 files changed, 30 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/1f9f38bf/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/AllWindowedStream.java
--
diff --git 
a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/AllWindowedStream.java
 
b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/AllWindowedStream.java
index c3c7424..4f4546e 100644
--- 
a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/AllWindowedStream.java
+++ 
b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/AllWindowedStream.java
@@ -826,7 +826,7 @@ public class AllWindowedStream {
 * @param function The window function.
 * @return The data stream that is the result of applying the window 
function to the window.
 *
-* @deprecated Use {@link #fold(R, FoldFunction, AllWindowFunction)} 
instead.
+* @deprecated Use {@link #fold(Object, FoldFunction, 
AllWindowFunction)} instead.
 */
@Deprecated
public  SingleOutputStreamOperator apply(R initialValue, 
FoldFunction foldFunction, AllWindowFunction function) {
@@ -851,7 +851,7 @@ public class AllWindowedStream {
 * @param resultType Type information for the result type of the window 
function
 * @return The data stream that is the result of applying the window 
function to the window.
 *
-* @deprecated Use {@link #fold(R, FoldFunction, AllWindowFunction, 
TypeInformation, TypeInformation)} instead.
+* @deprecated Use {@link #fold(Object, FoldFunction, 
AllWindowFunction, TypeInformation, TypeInformation)} instead.
 */
@Deprecated
public  SingleOutputStreamOperator apply(R initialValue, 
FoldFunction foldFunction, AllWindowFunction function, 
TypeInformation resultType) {

http://git-wip-us.apache.org/repos/asf/flink/blob/1f9f38bf/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/WindowedStream.java
--
diff --git 
a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/WindowedStream.java
 
b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/WindowedStream.java
index 6809df0..b28434c 100644
--- 
a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/WindowedStream.java
+++ 
b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/WindowedStream.java
@@ -1284,7 +1284,7 @@ public class WindowedStream {
 * @param function The window function.
 * @return The data stream that is the result of applying the window 
function to the window.
 *
-* @deprecated Use {@link #fold(R, FoldFunction, WindowFunction)} 
instead.
+* @deprecated Use {@link #fold(Object, FoldFunction, WindowFunction)} 
instead.
 */
@Deprecated
public  SingleOutputStreamOperator apply(R initialValue, 
FoldFunction foldFunction, WindowFunction function) {
@@ -1309,7 +1309,7 @@ public class WindowedStream {
 * @param resultType Type information for the result type of the window 
function
 * @return The data stream that is the result of applying the window 
function to the window.
 *
-* @deprecated Use {@link #fold(R, FoldFunction, WindowFunction, 
TypeInformation, TypeInformation)} instead.
+* @deprecated Use {@link #fold(Object, 

[3/7] flink git commit: [FLINK-5763] [checkpoints] Add CheckpointOptions

2017-02-23 Thread sewen
http://git-wip-us.apache.org/repos/asf/flink/blob/6e7a9174/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/io/BarrierBufferAlignmentLimitTest.java
--
diff --git 
a/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/io/BarrierBufferAlignmentLimitTest.java
 
b/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/io/BarrierBufferAlignmentLimitTest.java
index 46f228a..e407443 100644
--- 
a/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/io/BarrierBufferAlignmentLimitTest.java
+++ 
b/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/io/BarrierBufferAlignmentLimitTest.java
@@ -21,6 +21,7 @@ package org.apache.flink.streaming.runtime.io;
 import org.apache.flink.core.memory.MemorySegment;
 import org.apache.flink.core.memory.MemorySegmentFactory;
 import org.apache.flink.runtime.checkpoint.CheckpointMetrics;
+import org.apache.flink.runtime.checkpoint.CheckpointOptions;
 import 
org.apache.flink.runtime.checkpoint.decline.AlignmentLimitExceededException;
 import org.apache.flink.runtime.checkpoint.CheckpointMetaData;
 import org.apache.flink.runtime.io.disk.iomanager.IOManager;
@@ -155,7 +156,9 @@ public class BarrierBufferAlignmentLimitTest {
check(sequence[21], buffer.getNextNonBlocked());
 
// no call for a completed checkpoint must have happened
-   verify(toNotify, 
times(0)).triggerCheckpointOnBarrier(any(CheckpointMetaData.class),
+   verify(toNotify, times(0)).triggerCheckpointOnBarrier(
+   any(CheckpointMetaData.class),
+   any(CheckpointOptions.class),
any(CheckpointMetrics.class));
 
assertNull(buffer.getNextNonBlocked());
@@ -242,7 +245,8 @@ public class BarrierBufferAlignmentLimitTest {
// checkpoint 4 completed - check and validate buffered replay
check(sequence[9], buffer.getNextNonBlocked());
validateAlignmentTime(startTs, buffer);
-   verify(toNotify, 
times(1)).triggerCheckpointOnBarrier(argThat(new CheckpointMatcher(4L)), 
any(CheckpointMetrics.class));
+   verify(toNotify, times(1)).triggerCheckpointOnBarrier(
+   argThat(new CheckpointMatcher(4L)), 
any(CheckpointOptions.class), any(CheckpointMetrics.class));
 
check(sequence[10], buffer.getNextNonBlocked());
check(sequence[15], buffer.getNextNonBlocked());
@@ -254,7 +258,8 @@ public class BarrierBufferAlignmentLimitTest {
check(sequence[21], buffer.getNextNonBlocked());
 
// only checkpoint 4 was successfully completed, not checkpoint 
3
-   verify(toNotify, 
times(0)).triggerCheckpointOnBarrier(argThat(new CheckpointMatcher(3L)), 
any(CheckpointMetrics.class));
+   verify(toNotify, times(0)).triggerCheckpointOnBarrier(
+   argThat(new CheckpointMatcher(3L)), 
any(CheckpointOptions.class), any(CheckpointMetrics.class));
 
assertNull(buffer.getNextNonBlocked());
assertNull(buffer.getNextNonBlocked());
@@ -284,7 +289,7 @@ public class BarrierBufferAlignmentLimitTest {
}
 
private static BufferOrEvent createBarrier(long id, int channel) {
-   return new BufferOrEvent(new CheckpointBarrier(id, 
System.currentTimeMillis()), channel);
+   return new BufferOrEvent(new CheckpointBarrier(id, 
System.currentTimeMillis(), CheckpointOptions.forFullCheckpoint()), channel);
}
 
private static void check(BufferOrEvent expected, BufferOrEvent 
present) {

http://git-wip-us.apache.org/repos/asf/flink/blob/6e7a9174/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/io/BarrierBufferMassiveRandomTest.java
--
diff --git 
a/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/io/BarrierBufferMassiveRandomTest.java
 
b/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/io/BarrierBufferMassiveRandomTest.java
index 0cf866a..6e088f6 100644
--- 
a/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/io/BarrierBufferMassiveRandomTest.java
+++ 
b/flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/io/BarrierBufferMassiveRandomTest.java
@@ -18,6 +18,7 @@
 package org.apache.flink.streaming.runtime.io;
 
 import org.apache.flink.core.memory.MemoryType;
+import org.apache.flink.runtime.checkpoint.CheckpointOptions;
 import org.apache.flink.runtime.event.TaskEvent;
 import org.apache.flink.runtime.io.disk.iomanager.IOManager;
 import org.apache.flink.runtime.io.disk.iomanager.IOManagerAsync;
@@ -151,7 +152,7 @@ public class BarrierBufferMassiveRandomTest {
 
if (barrierGens[currentChannel].isNextBarrier()) {
 

[5/7] flink git commit: [FLINK-5763] [checkpoints] Add CheckpointOptions

2017-02-23 Thread sewen
[FLINK-5763] [checkpoints] Add CheckpointOptions

Adds `CheckpointOptions` to the triggered checkpoint messages (coordinator
to barrier injecting tasks) and barriers (flowing inline with the data:

```java
public class CheckpointOptions {

  // Type of checkpoint
  // => FULL_CHECKPOINT
  // => SAVEPOINT
  @NonNull
  CheckpointType getCheckpointType();

  // Custom target location. This is a String, because for future
  // backends it can be a logical location like a DB table.
  @Nullable
  String getTargetLocation();

}
```

This class would be the place to define more options for performing the
checkpoints (for example for incremental checkpoints).

These options are forwarded via the `StreamTask` to the `StreamOperator`s and
`Snapshotable` backends. The `AbstractStreamOperator` checks the options and
either i) forwards the shared per operator `CheckpointStreamFactory` (as of

For this, the state backends provide the following new method:

```
CheckpointStreamFactory createSavepointStreamFactory(JobID, String, String);
```

The `MemoryStateBackend` returns the regular stream factory and the
`FsStateBackend` returns a `FsSavepointStreamFactory`, which writes all
checkpoint streams to a single directory (instead of the regular sub folders
per checkpoint).

We end up with the following directory layout for savepoints:

```
+---+
| :root_savepoint_directory | (custom per savepoint or configured default via 
`state.savepoints.dir`)
+---+
  | +---+
  +-| savepoint-:jobId(0, 6)-:random_suffix | (one directory per savepoint)
+---+
   |
   +- _metadata (one per savepoint)
   +- :uuid (one data file per StreamTask)
   +- ...
   +- :uuid
```


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/6e7a9174
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/6e7a9174
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/6e7a9174

Branch: refs/heads/master
Commit: 6e7a91741708a2b167a2bbca5dda5b2059df5e18
Parents: 1f9f38b
Author: Ufuk Celebi 
Authored: Thu Feb 16 17:56:23 2017 +0100
Committer: Stephan Ewen 
Committed: Thu Feb 23 18:39:49 2017 +0100

--
 .../connectors/fs/RollingSinkITCase.java|   1 -
 .../state/RocksDBKeyedStateBackend.java |   5 +-
 .../streaming/state/RocksDBStateBackend.java|   9 ++
 .../state/RocksDBAsyncSnapshotTest.java |   8 +-
 .../state/RocksDBStateBackendTest.java  |  15 +-
 .../checkpoint/CheckpointCoordinator.java   |  56 ++--
 .../runtime/checkpoint/CheckpointOptions.java   | 108 +++
 .../runtime/checkpoint/CompletedCheckpoint.java |   2 +-
 .../runtime/checkpoint/PendingCheckpoint.java   |   3 +-
 .../checkpoint/savepoint/SavepointStore.java| 137 +--
 .../flink/runtime/executiongraph/Execution.java |   6 +-
 .../io/network/api/CheckpointBarrier.java   |  44 +-
 .../api/serialization/EventSerializer.java  |  59 +++-
 .../runtime/jobgraph/tasks/StatefulTask.java|   7 +-
 .../slots/ActorTaskManagerGateway.java  |   6 +-
 .../jobmanager/slots/TaskManagerGateway.java|   5 +-
 .../jobmaster/RpcTaskManagerGateway.java|   3 +-
 .../messages/checkpoint/TriggerCheckpoint.java  |  19 ++-
 .../state/AbstractKeyedStateBackend.java|   3 +-
 .../runtime/state/AbstractStateBackend.java |   8 ++
 .../state/DefaultOperatorStateBackend.java  |   8 +-
 .../flink/runtime/state/Snapshotable.java   |   5 +-
 .../flink/runtime/state/StateBackend.java   |  22 +++
 .../filesystem/FsCheckpointStreamFactory.java   |  21 +--
 .../filesystem/FsSavepointStreamFactory.java|  58 
 .../state/filesystem/FsStateBackend.java|   9 ++
 .../state/heap/HeapKeyedStateBackend.java   |   4 +-
 .../state/memory/MemoryStateBackend.java|   9 ++
 .../runtime/taskexecutor/TaskExecutor.java  |   5 +-
 .../taskexecutor/TaskExecutorGateway.java   |   4 +-
 .../apache/flink/runtime/taskmanager/Task.java  |  10 +-
 .../flink/runtime/jobmanager/JobManager.scala   |   2 +-
 .../flink/runtime/taskmanager/TaskManager.scala |   3 +-
 .../checkpoint/CheckpointCoordinatorTest.java   |  53 ---
 .../checkpoint/CheckpointOptionsTest.java   |  48 +++
 .../checkpoint/CheckpointStatsHistoryTest.java  |   1 +
 .../savepoint/MigrationV0ToV1Test.java  |   2 +-
 .../savepoint/SavepointLoaderTest.java  |   4 +-
 .../savepoint/SavepointStoreTest.java   |  48 +--
 .../io/network/api/CheckpointBarrierTest.java   |  61 +
 .../api/serialization/EventSerializerTest.java  |  45 --
 .../io/network/api/writer/RecordWriterTest.java |   5 +-
 .../jobmanager/JobManagerHARecoveryTest.java|   5 +-
 

[4/7] flink git commit: [FLINK-5763] [checkpoints] Add CheckpointOptions

2017-02-23 Thread sewen
http://git-wip-us.apache.org/repos/asf/flink/blob/6e7a9174/flink-runtime/src/test/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinatorTest.java
--
diff --git 
a/flink-runtime/src/test/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinatorTest.java
 
b/flink-runtime/src/test/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinatorTest.java
index c2ada3b..d8e46fa 100644
--- 
a/flink-runtime/src/test/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinatorTest.java
+++ 
b/flink-runtime/src/test/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinatorTest.java
@@ -313,8 +313,8 @@ public class CheckpointCoordinatorTest {
assertFalse(checkpoint.isFullyAcknowledged());
 
// check that the vertices received the trigger 
checkpoint message
-   
verify(vertex1.getCurrentExecutionAttempt()).triggerCheckpoint(checkpointId, 
timestamp);
-   
verify(vertex2.getCurrentExecutionAttempt()).triggerCheckpoint(checkpointId, 
timestamp);
+   
verify(vertex1.getCurrentExecutionAttempt()).triggerCheckpoint(checkpointId, 
timestamp, CheckpointOptions.forFullCheckpoint());
+   
verify(vertex2.getCurrentExecutionAttempt()).triggerCheckpoint(checkpointId, 
timestamp, CheckpointOptions.forFullCheckpoint());
 
CheckpointMetaData checkpointMetaData = new 
CheckpointMetaData(checkpointId, 0L);
 
@@ -428,14 +428,14 @@ public class CheckpointCoordinatorTest {
 
// check that the vertices received the trigger 
checkpoint message
{
-   verify(vertex1.getCurrentExecutionAttempt(), 
times(1)).triggerCheckpoint(eq(checkpoint1Id), eq(timestamp));
-   verify(vertex2.getCurrentExecutionAttempt(), 
times(1)).triggerCheckpoint(eq(checkpoint1Id), eq(timestamp));
+   verify(vertex1.getCurrentExecutionAttempt(), 
times(1)).triggerCheckpoint(eq(checkpoint1Id), eq(timestamp), 
any(CheckpointOptions.class));
+   verify(vertex2.getCurrentExecutionAttempt(), 
times(1)).triggerCheckpoint(eq(checkpoint1Id), eq(timestamp), 
any(CheckpointOptions.class));
}
 
// check that the vertices received the trigger 
checkpoint message for the second checkpoint
{
-   verify(vertex1.getCurrentExecutionAttempt(), 
times(1)).triggerCheckpoint(eq(checkpoint2Id), eq(timestamp + 2));
-   verify(vertex2.getCurrentExecutionAttempt(), 
times(1)).triggerCheckpoint(eq(checkpoint2Id), eq(timestamp + 2));
+   verify(vertex1.getCurrentExecutionAttempt(), 
times(1)).triggerCheckpoint(eq(checkpoint2Id), eq(timestamp + 2), 
any(CheckpointOptions.class));
+   verify(vertex2.getCurrentExecutionAttempt(), 
times(1)).triggerCheckpoint(eq(checkpoint2Id), eq(timestamp + 2), 
any(CheckpointOptions.class));
}
 
// decline checkpoint from one of the tasks, this 
should cancel the checkpoint
@@ -529,8 +529,8 @@ public class CheckpointCoordinatorTest {
 
// check that the vertices received the trigger 
checkpoint message
{
-   verify(vertex1.getCurrentExecutionAttempt(), 
times(1)).triggerCheckpoint(eq(checkpointId), eq(timestamp));
-   verify(vertex2.getCurrentExecutionAttempt(), 
times(1)).triggerCheckpoint(eq(checkpointId), eq(timestamp));
+   verify(vertex1.getCurrentExecutionAttempt(), 
times(1)).triggerCheckpoint(eq(checkpointId), eq(timestamp), 
any(CheckpointOptions.class));
+   verify(vertex2.getCurrentExecutionAttempt(), 
times(1)).triggerCheckpoint(eq(checkpointId), eq(timestamp), 
any(CheckpointOptions.class));
}
 
// acknowledge from one of the tasks
@@ -558,8 +558,8 @@ public class CheckpointCoordinatorTest {
 
// validate that the relevant tasks got a confirmation 
message
{
-   verify(vertex1.getCurrentExecutionAttempt(), 
times(1)).triggerCheckpoint(eq(checkpointId), eq(timestamp));
-   verify(vertex2.getCurrentExecutionAttempt(), 
times(1)).triggerCheckpoint(eq(checkpointId), eq(timestamp));
+   verify(vertex1.getCurrentExecutionAttempt(), 
times(1)).triggerCheckpoint(eq(checkpointId), eq(timestamp), 
any(CheckpointOptions.class));
+   verify(vertex2.getCurrentExecutionAttempt(), 
times(1)).triggerCheckpoint(eq(checkpointId), eq(timestamp), 
any(CheckpointOptions.class));
   

[7/7] flink git commit: [hotfix] [checkpoints] Remove equals()/hashCode() from CompletedCheckpoint as semantic equality is not well defined.

2017-02-23 Thread sewen
[hotfix] [checkpoints] Remove equals()/hashCode() from CompletedCheckpoint as 
semantic equality is not well defined.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/417597fb
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/417597fb
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/417597fb

Branch: refs/heads/master
Commit: 417597fbf71ac9062bed1abf04139d46ec830ec4
Parents: 8ffe75a
Author: Stephan Ewen 
Authored: Wed Feb 22 22:19:11 2017 +0100
Committer: Stephan Ewen 
Committed: Thu Feb 23 18:39:50 2017 +0100

--
 .../runtime/checkpoint/CompletedCheckpoint.java | 24 +---
 1 file changed, 1 insertion(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/417597fb/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CompletedCheckpoint.java
--
diff --git 
a/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CompletedCheckpoint.java
 
b/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CompletedCheckpoint.java
index 53d888e..db86484 100644
--- 
a/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CompletedCheckpoint.java
+++ 
b/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CompletedCheckpoint.java
@@ -23,13 +23,13 @@ import 
org.apache.flink.runtime.checkpoint.savepoint.SavepointStore;
 import org.apache.flink.runtime.jobgraph.JobStatus;
 import org.apache.flink.runtime.jobgraph.JobVertexID;
 import org.apache.flink.runtime.state.StateUtil;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import javax.annotation.Nullable;
 import java.io.Serializable;
 import java.util.Map;
-import java.util.Objects;
 
 import static org.apache.flink.util.Preconditions.checkArgument;
 import static org.apache.flink.util.Preconditions.checkNotNull;
@@ -206,29 +206,7 @@ public class CompletedCheckpoint implements Serializable {
// 

 
@Override
-   public boolean equals(Object obj) {
-   if (obj instanceof CompletedCheckpoint) {
-   CompletedCheckpoint other = (CompletedCheckpoint) obj;
-
-   return job.equals(other.job) && checkpointID == 
other.checkpointID &&
-   timestamp == other.timestamp && duration == 
other.duration &&
-   taskStates.equals(other.taskStates);
-   } else {
-   return false;
-   }
-   }
-
-   @Override
-   public int hashCode() {
-   return (int) (this.checkpointID ^ this.checkpointID >>> 32) +
-   31 * ((int) (this.timestamp ^ this.timestamp >>> 32) +
-   31 * ((int) (this.duration ^ this.duration >>> 
32) +
-   31 * Objects.hash(job, taskStates)));
-   }
-
-   @Override
public String toString() {
return String.format("Checkpoint %d @ %d for %s", checkpointID, 
timestamp, job);
}
-
 }



[10/19] flink git commit: [FLINK-4813] [test-utils] Make the hadoop-minikdc dependency optional

2017-02-19 Thread sewen
[FLINK-4813] [test-utils] Make the hadoop-minikdc dependency optional

With this change, any project using flink-test-utils which also requires
SecureTestEnvironment must add a dependency to hadoop-minikdc itself, e.g. in
pom.xml:

   ...
   
 
   org.apache.hadoop
   hadoop-minikdc
   ${minikdc.version}
   compile
 
   ...
   
   ...

   
 
   
   
   org.apache.felix
   maven-bundle-plugin
   3.0.1
   true
   true
 
   ...

This closes #3322


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/391efd35
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/391efd35
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/391efd35

Branch: refs/heads/master
Commit: 391efd35bffe3f5796cd655ae1598f102a2e8fb7
Parents: 1ceb7d8
Author: Nico Kruber 
Authored: Wed Feb 15 14:24:32 2017 +0100
Committer: Stephan Ewen 
Committed: Mon Feb 20 01:01:23 2017 +0100

--
 .../flink-test-utils/pom.xml| 23 +
 .../flink/test/util/SecureTestEnvironment.java  | 34 ++--
 pom.xml | 10 --
 3 files changed, 55 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/391efd35/flink-test-utils-parent/flink-test-utils/pom.xml
--
diff --git a/flink-test-utils-parent/flink-test-utils/pom.xml 
b/flink-test-utils-parent/flink-test-utils/pom.xml
index b986056..069f1da 100644
--- a/flink-test-utils-parent/flink-test-utils/pom.xml
+++ b/flink-test-utils-parent/flink-test-utils/pom.xml
@@ -83,8 +83,31 @@ under the License.
hadoop-minikdc
${minikdc.version}
compile
+   
+   true

 

 
+   
+   
+   
+   
+   org.apache.felix
+   maven-bundle-plugin
+   3.0.1
+   true
+   true
+   
+   
+   
+
 

http://git-wip-us.apache.org/repos/asf/flink/blob/391efd35/flink-test-utils-parent/flink-test-utils/src/main/java/org/apache/flink/test/util/SecureTestEnvironment.java
--
diff --git 
a/flink-test-utils-parent/flink-test-utils/src/main/java/org/apache/flink/test/util/SecureTestEnvironment.java
 
b/flink-test-utils-parent/flink-test-utils/src/main/java/org/apache/flink/test/util/SecureTestEnvironment.java
index 10450c3..febd074 100644
--- 
a/flink-test-utils-parent/flink-test-utils/src/main/java/org/apache/flink/test/util/SecureTestEnvironment.java
+++ 
b/flink-test-utils-parent/flink-test-utils/src/main/java/org/apache/flink/test/util/SecureTestEnvironment.java
@@ -37,9 +37,39 @@ import java.util.Properties;
 /**
  * Helper {@link SecureTestEnvironment} to handle MiniKDC lifecycle.
  * This class can be used to start/stop MiniKDC and create secure 
configurations for MiniDFSCluster
- * and MiniYarn
+ * and MiniYarn.
+ *
+ * If you use this class in your project, please make sure to add a dependency 
to
+ * hadoop-minikdc, e.g. in your pom.xml:
+ * {@code
+ * ...
+ * 
+ *   
+ * org.apache.hadoop
+ * hadoop-minikdc
+ * ${minikdc.version}
+ * compile
+ *   
+ * ...
+ * 
+ * ...
+ *
+ * 
+ *   
+ * 
+ * 
+ * org.apache.felix
+ * maven-bundle-plugin
+ * 3.0.1
+ * true
+ * true
+ *   
+ * ...
+ * }
  */
-
 public class SecureTestEnvironment {
 
protected static final Logger LOG = 
LoggerFactory.getLogger(SecureTestEnvironment.class);

http://git-wip-us.apache.org/repos/asf/flink/blob/391efd35/pom.xml
--
diff --git a/pom.xml b/pom.xml
index e6fae81..d81296e 100644
--- a/pom.xml
+++ b/pom.xml
@@ -1120,16 +1120,6 @@ under the License.


 
-   
-   
-   org.apache.felix
-   maven-bundle-plugin
-   3.0.1
-   true
-   true
-   
-

 
 



[03/19] flink git commit: [FLINK-5497] [tests] Remove duplicated tests for hash tables

2017-02-19 Thread sewen
[FLINK-5497] [tests] Remove duplicated tests for hash tables

This closes #3089


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/53134594
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/53134594
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/53134594

Branch: refs/heads/master
Commit: 53134594644407d0a3cd691b0e93ae09ff6c8102
Parents: 9f544d8
Author: Alexey Diomin 
Authored: Tue Jan 10 22:04:41 2017 +0400
Committer: Stephan Ewen 
Committed: Sat Feb 18 19:19:34 2017 +0100

--
 .../NonReusingReOpenableHashTableITCase.java| 421 +-
 .../hash/ReOpenableHashTableITCase.java | 222 ++
 .../hash/ReOpenableHashTableTestBase.java   | 193 +
 .../hash/ReusingReOpenableHashTableITCase.java  | 429 +--
 4 files changed, 429 insertions(+), 836 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/53134594/flink-runtime/src/test/java/org/apache/flink/runtime/operators/hash/NonReusingReOpenableHashTableITCase.java
--
diff --git 
a/flink-runtime/src/test/java/org/apache/flink/runtime/operators/hash/NonReusingReOpenableHashTableITCase.java
 
b/flink-runtime/src/test/java/org/apache/flink/runtime/operators/hash/NonReusingReOpenableHashTableITCase.java
index 576cbd4..6b4e170 100644
--- 
a/flink-runtime/src/test/java/org/apache/flink/runtime/operators/hash/NonReusingReOpenableHashTableITCase.java
+++ 
b/flink-runtime/src/test/java/org/apache/flink/runtime/operators/hash/NonReusingReOpenableHashTableITCase.java
@@ -19,190 +19,34 @@
 
 package org.apache.flink.runtime.operators.hash;
 
-import org.apache.flink.api.common.typeutils.TypeComparator;
-import org.apache.flink.api.common.typeutils.TypePairComparator;
-import org.apache.flink.api.common.typeutils.TypeSerializer;
-import org.apache.flink.core.memory.MemorySegment;
-import org.apache.flink.core.memory.MemoryType;
-import org.apache.flink.runtime.io.disk.iomanager.IOManager;
-import org.apache.flink.runtime.io.disk.iomanager.IOManagerAsync;
-import org.apache.flink.runtime.jobgraph.tasks.AbstractInvokable;
-import org.apache.flink.runtime.memory.MemoryAllocationException;
-import org.apache.flink.runtime.memory.MemoryManager;
 import 
org.apache.flink.runtime.operators.hash.NonReusingHashJoinIteratorITCase.TupleMatch;
 import 
org.apache.flink.runtime.operators.hash.NonReusingHashJoinIteratorITCase.TupleMatchRemovingJoin;
 import org.apache.flink.runtime.operators.testutils.DiscardingOutputCollector;
-import org.apache.flink.runtime.operators.testutils.DummyInvokable;
 import org.apache.flink.runtime.operators.testutils.TestData;
 import org.apache.flink.runtime.operators.testutils.TestData.TupleGenerator;
-import 
org.apache.flink.runtime.operators.testutils.TestData.TupleGenerator.KeyMode;
-import 
org.apache.flink.runtime.operators.testutils.TestData.TupleGenerator.ValueMode;
-import org.apache.flink.runtime.operators.testutils.UnionIterator;
 import org.apache.flink.util.Collector;
-import org.apache.flink.util.MutableObjectIterator;
-import org.junit.After;
 import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
 
-import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
-import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import org.apache.flink.api.common.functions.FlatJoinFunction;
-import org.apache.flink.api.common.typeutils.GenericPairComparator;
 import org.apache.flink.api.java.tuple.Tuple2;
-import org.apache.flink.runtime.operators.testutils.UniformIntTupleGenerator;
 
-import static org.junit.Assert.fail;
+import static 
org.apache.flink.runtime.operators.hash.NonReusingHashJoinIteratorITCase.joinTuples;
+import static 
org.apache.flink.runtime.operators.hash.NonReusingHashJoinIteratorITCase.collectTupleData;
 
 /**
  * Test specialized hash join that keeps the build side data (in memory and on 
hard disk)
  * This is used for iterative tasks.
  */
-public class NonReusingReOpenableHashTableITCase {
+public class NonReusingReOpenableHashTableITCase extends 
ReOpenableHashTableTestBase {
 
-   private static final int PAGE_SIZE = 8 * 1024;
-   private static final long MEMORY_SIZE = PAGE_SIZE * 1000; // 100 Pages.
-
-   private static final long SEED1 = 561349061987311L;
-   private static final long SEED2 = 231434613412342L;
-
-   private static final int NUM_PROBES = 3; // number of reopenings of 
hash join
-
-   private final AbstractInvokable parentTask = new DummyInvokable();
-
-   private IOManager ioManager;
-   private MemoryManager memoryManager;
-
-   private TypeSerializer> 

[17/19] flink git commit: [hotfix] [core] Add missing @Internal annotations to classes in flink-core.

2017-02-19 Thread sewen
[hotfix] [core] Add missing @Internal annotations to classes in flink-core.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/082d40fd
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/082d40fd
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/082d40fd

Branch: refs/heads/master
Commit: 082d40fdc415952737a109c913a86d009ed234c1
Parents: 0aa9918
Author: Fabian Hueske 
Authored: Wed Feb 15 15:25:15 2017 +0100
Committer: Stephan Ewen 
Committed: Mon Feb 20 01:01:24 2017 +0100

--
 .../org/apache/flink/api/common/ArchivedExecutionConfig.java| 3 +++
 .../api/common/typeutils/TypeSerializerSerializationProxy.java  | 4 +++-
 .../apache/flink/api/common/typeutils/base/ListSerializer.java  | 2 ++
 .../apache/flink/api/java/typeutils/runtime/AvroSerializer.java | 2 ++
 .../api/java/typeutils/runtime/CopyableValueComparator.java | 2 ++
 .../api/java/typeutils/runtime/CopyableValueSerializer.java | 2 ++
 .../flink/api/java/typeutils/runtime/DataInputDecoder.java  | 3 ++-
 .../flink/api/java/typeutils/runtime/DataInputViewStream.java   | 2 ++
 .../flink/api/java/typeutils/runtime/DataOutputEncoder.java | 3 ++-
 .../flink/api/java/typeutils/runtime/DataOutputViewStream.java  | 2 ++
 .../flink/api/java/typeutils/runtime/EitherSerializer.java  | 2 ++
 .../flink/api/java/typeutils/runtime/FieldSerializer.java   | 3 +++
 .../flink/api/java/typeutils/runtime/GenericTypeComparator.java | 2 ++
 .../flink/api/java/typeutils/runtime/NoFetchingInput.java   | 2 ++
 .../flink/api/java/typeutils/runtime/NullAwareComparator.java   | 2 ++
 .../apache/flink/api/java/typeutils/runtime/NullMaskUtils.java  | 2 ++
 .../apache/flink/api/java/typeutils/runtime/PojoComparator.java | 3 ++-
 .../apache/flink/api/java/typeutils/runtime/PojoSerializer.java | 2 ++
 .../apache/flink/api/java/typeutils/runtime/RowComparator.java  | 2 ++
 .../apache/flink/api/java/typeutils/runtime/RowSerializer.java  | 2 ++
 .../api/java/typeutils/runtime/RuntimeComparatorFactory.java| 2 ++
 .../java/typeutils/runtime/RuntimePairComparatorFactory.java| 2 ++
 .../api/java/typeutils/runtime/RuntimeSerializerFactory.java| 2 ++
 .../flink/api/java/typeutils/runtime/Tuple0Serializer.java  | 2 ++
 .../flink/api/java/typeutils/runtime/TupleComparator.java   | 3 ++-
 .../flink/api/java/typeutils/runtime/TupleComparatorBase.java   | 3 ++-
 .../flink/api/java/typeutils/runtime/TupleSerializer.java   | 3 ++-
 .../flink/api/java/typeutils/runtime/TupleSerializerBase.java   | 2 ++
 .../flink/api/java/typeutils/runtime/ValueComparator.java   | 2 ++
 .../flink/api/java/typeutils/runtime/ValueSerializer.java   | 2 ++
 .../apache/flink/core/fs/AbstractMultiFSDataInputStream.java| 2 ++
 .../java/org/apache/flink/core/fs/ClosingFSDataInputStream.java | 4 +++-
 .../org/apache/flink/core/fs/ClosingFSDataOutputStream.java | 4 +++-
 .../java/org/apache/flink/core/fs/FSDataInputStreamWrapper.java | 4 +++-
 .../org/apache/flink/core/fs/FSDataOutputStreamWrapper.java | 4 +++-
 .../org/apache/flink/core/fs/SafetyNetWrapperFileSystem.java| 4 +++-
 .../java/org/apache/flink/core/fs/WrappingProxyCloseable.java   | 2 ++
 .../apache/flink/core/memory/ByteArrayOutputStreamWithPos.java  | 2 ++
 .../java/org/apache/flink/util/AbstractCloseableRegistry.java   | 3 +++
 .../src/main/java/org/apache/flink/util/CollectionUtil.java | 3 +++
 flink-core/src/main/java/org/apache/flink/util/FutureUtil.java  | 5 -
 flink-core/src/main/java/org/apache/flink/util/Migration.java   | 3 +++
 .../src/main/java/org/apache/flink/util/WrappingProxy.java  | 3 +++
 .../src/main/java/org/apache/flink/util/WrappingProxyUtil.java  | 5 -
 44 files changed, 104 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/082d40fd/flink-core/src/main/java/org/apache/flink/api/common/ArchivedExecutionConfig.java
--
diff --git 
a/flink-core/src/main/java/org/apache/flink/api/common/ArchivedExecutionConfig.java
 
b/flink-core/src/main/java/org/apache/flink/api/common/ArchivedExecutionConfig.java
index faf920d..f267e91 100644
--- 
a/flink-core/src/main/java/org/apache/flink/api/common/ArchivedExecutionConfig.java
+++ 
b/flink-core/src/main/java/org/apache/flink/api/common/ArchivedExecutionConfig.java
@@ -17,6 +17,8 @@
  */
 package org.apache.flink.api.common;
 
+import org.apache.flink.annotation.Internal;
+
 import java.io.Serializable;
 import java.util.Collections;
 import java.util.Map;
@@ -26,6 +28,7 @@ import java.util.Map;
  * It can be used to display job information on the web interface
  * without having to keep the classloader around after job completion.
  */
+@Internal
 public 

[15/19] flink git commit: [FLINK-5747] [distributed coordination] Eager scheduling allocates slots and deploys tasks in bulk

2017-02-19 Thread sewen
http://git-wip-us.apache.org/repos/asf/flink/blob/f113d794/flink-runtime/src/test/java/org/apache/flink/runtime/leaderelection/LeaderChangeJobRecoveryTest.java
--
diff --git 
a/flink-runtime/src/test/java/org/apache/flink/runtime/leaderelection/LeaderChangeJobRecoveryTest.java
 
b/flink-runtime/src/test/java/org/apache/flink/runtime/leaderelection/LeaderChangeJobRecoveryTest.java
index 8a9a4ce..be26e7b 100644
--- 
a/flink-runtime/src/test/java/org/apache/flink/runtime/leaderelection/LeaderChangeJobRecoveryTest.java
+++ 
b/flink-runtime/src/test/java/org/apache/flink/runtime/leaderelection/LeaderChangeJobRecoveryTest.java
@@ -19,16 +19,13 @@
 package org.apache.flink.runtime.leaderelection;
 
 import org.apache.flink.api.common.ExecutionConfig;
-import org.apache.flink.api.common.JobID;
 import org.apache.flink.configuration.ConfigConstants;
 import org.apache.flink.configuration.Configuration;
-import org.apache.flink.core.testutils.OneShotLatch;
 import org.apache.flink.runtime.executiongraph.ExecutionGraph;
-import org.apache.flink.runtime.executiongraph.JobStatusListener;
+import org.apache.flink.runtime.executiongraph.TerminalJobStatusListener;
 import org.apache.flink.runtime.instance.ActorGateway;
 import org.apache.flink.runtime.jobgraph.DistributionPattern;
 import org.apache.flink.runtime.jobgraph.JobGraph;
-import org.apache.flink.runtime.jobgraph.JobStatus;
 import org.apache.flink.runtime.jobgraph.JobVertex;
 import org.apache.flink.runtime.jobmanager.Tasks;
 import org.apache.flink.runtime.jobmanager.scheduler.SlotSharingGroup;
@@ -113,7 +110,7 @@ public class LeaderChangeJobRecoveryTest extends TestLogger 
{
 
ExecutionGraph executionGraph = (ExecutionGraph) 
((TestingJobManagerMessages.ExecutionGraphFound) 
responseExecutionGraph).executionGraph();
 
-   TestJobStatusListener testListener = new 
TestJobStatusListener();
+   TerminalJobStatusListener testListener = new 
TerminalJobStatusListener();
executionGraph.registerJobStatusListener(testListener);
 
cluster.revokeLeadership();
@@ -146,20 +143,4 @@ public class LeaderChangeJobRecoveryTest extends 
TestLogger {
 
return jobGraph;
}
-
-   public static class TestJobStatusListener implements JobStatusListener {
-
-   private final OneShotLatch terminalStateLatch = new 
OneShotLatch();
-
-   public void waitForTerminalState(long timeoutMillis) throws 
InterruptedException, TimeoutException {
-   terminalStateLatch.await(timeoutMillis, 
TimeUnit.MILLISECONDS);
-   }
-
-   @Override
-   public void jobStatusChanges(JobID jobId, JobStatus 
newJobStatus, long timestamp, Throwable error) {
-   if (newJobStatus.isGloballyTerminalState() || 
newJobStatus == JobStatus.SUSPENDED) {
-   terminalStateLatch.trigger();
-   }
-   }
-   }
 }

http://git-wip-us.apache.org/repos/asf/flink/blob/f113d794/flink-runtime/src/test/java/org/apache/flink/runtime/minicluster/MiniClusterITCase.java
--
diff --git 
a/flink-runtime/src/test/java/org/apache/flink/runtime/minicluster/MiniClusterITCase.java
 
b/flink-runtime/src/test/java/org/apache/flink/runtime/minicluster/MiniClusterITCase.java
index d9a1896..f656622 100644
--- 
a/flink-runtime/src/test/java/org/apache/flink/runtime/minicluster/MiniClusterITCase.java
+++ 
b/flink-runtime/src/test/java/org/apache/flink/runtime/minicluster/MiniClusterITCase.java
@@ -23,6 +23,7 @@ import org.apache.flink.api.common.JobID;
 import org.apache.flink.api.common.restartstrategy.RestartStrategies;
 import org.apache.flink.runtime.jobgraph.JobGraph;
 import org.apache.flink.runtime.jobgraph.JobVertex;
+import org.apache.flink.runtime.jobgraph.ScheduleMode;
 import org.apache.flink.runtime.testtasks.NoOpInvokable;
 import org.apache.flink.util.TestLogger;
 
@@ -46,7 +47,13 @@ public class MiniClusterITCase extends TestLogger {
cfg.setUseSingleRpcService();
 
MiniCluster miniCluster = new MiniCluster(cfg);
-   executeJob(miniCluster);
+   try {
+   miniCluster.start();
+   executeJob(miniCluster);
+   }
+   finally {
+   miniCluster.shutdown();
+   }
}
 
@Test
@@ -55,7 +62,13 @@ public class MiniClusterITCase extends TestLogger {
cfg.setUseRpcServicePerComponent();
 
MiniCluster miniCluster = new MiniCluster(cfg);
-   executeJob(miniCluster);
+   try {
+   miniCluster.start();
+   executeJob(miniCluster);
+   }
+   finally {
+   

[04/19] flink git commit: [FLINK-5129] [distributed runtime] BlobCache to directly accesses Blobs from distrinbuted file system if possible

2017-02-19 Thread sewen
[FLINK-5129] [distributed runtime] BlobCache to directly accesses Blobs from 
distrinbuted file system if possible

This closes #3084


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/9f544d83
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/9f544d83
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/9f544d83

Branch: refs/heads/master
Commit: 9f544d83b3443cf33f5890efdb956678847d445f
Parents: e68ee5c
Author: Nico Kruber 
Authored: Tue Nov 22 12:49:03 2016 +0100
Committer: Stephan Ewen 
Committed: Sat Feb 18 19:19:34 2017 +0100

--
 .../handlers/TaskManagerLogHandler.java |   2 +-
 .../apache/flink/runtime/blob/BlobCache.java| 269 +++
 .../apache/flink/runtime/blob/BlobClient.java   |   3 +-
 .../apache/flink/runtime/blob/BlobServer.java   |  56 +---
 .../runtime/blob/BlobServerConnection.java  |   8 +
 .../apache/flink/runtime/blob/BlobStore.java|  29 +-
 .../apache/flink/runtime/blob/BlobUtils.java|  75 +++---
 .../flink/runtime/blob/FileSystemBlobStore.java |  34 +--
 .../flink/runtime/blob/VoidBlobStore.java   |   9 +-
 .../apache/flink/runtime/client/JobClient.java  |   8 +-
 .../librarycache/BlobLibraryCacheManager.java   |  13 +-
 .../highavailability/ZookeeperHaServices.java   |  20 +-
 .../runtime/taskexecutor/TaskExecutor.java  |  19 +-
 .../runtime/blob/BlobCacheRetriesTest.java  |  86 +-
 .../runtime/blob/BlobCacheSuccessTest.java  |  76 +-
 .../flink/runtime/blob/BlobRecoveryITCase.java  |  31 +--
 .../runtime/blob/BlobServerDeleteTest.java  |  66 ++---
 .../flink/runtime/blob/BlobServerRangeTest.java |   1 +
 .../flink/runtime/blob/BlobUtilsTest.java   |   6 +-
 .../BlobLibraryCacheRecoveryITCase.java |  18 +-
 20 files changed, 498 insertions(+), 331 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/9f544d83/flink-runtime-web/src/main/java/org/apache/flink/runtime/webmonitor/handlers/TaskManagerLogHandler.java
--
diff --git 
a/flink-runtime-web/src/main/java/org/apache/flink/runtime/webmonitor/handlers/TaskManagerLogHandler.java
 
b/flink-runtime-web/src/main/java/org/apache/flink/runtime/webmonitor/handlers/TaskManagerLogHandler.java
index 78c4455..6583d3b 100644
--- 
a/flink-runtime-web/src/main/java/org/apache/flink/runtime/webmonitor/handlers/TaskManagerLogHandler.java
+++ 
b/flink-runtime-web/src/main/java/org/apache/flink/runtime/webmonitor/handlers/TaskManagerLogHandler.java
@@ -150,7 +150,7 @@ public class TaskManagerLogHandler extends 
RuntimeMonitorHandlerBase {
scala.concurrent.Future portFuture = 
jobManager.ask(JobManagerMessages.getRequestBlobManagerPort(), timeout);
scala.concurrent.Future cacheFuture = 
portFuture.map(new Mapper() {
@Override
-   public BlobCache apply(Object result) {
+   public BlobCache checkedApply(Object result) 
throws IOException {
Option hostOption = 
jobManager.actor().path().address().host();
String host = hostOption.isDefined() ? 
hostOption.get() : "localhost";
int port = (int) result;

http://git-wip-us.apache.org/repos/asf/flink/blob/9f544d83/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobCache.java
--
diff --git 
a/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobCache.java 
b/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobCache.java
index 7ef1f04..2587b15 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobCache.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobCache.java
@@ -20,12 +20,12 @@ package org.apache.flink.runtime.blob;
 
 import org.apache.flink.configuration.ConfigConstants;
 import org.apache.flink.configuration.Configuration;
+import org.apache.flink.runtime.highavailability.HighAvailabilityServices;
 import org.apache.flink.util.FileUtils;
-
+import org.apache.flink.util.IOUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.Closeable;
 import java.io.File;
 import java.io.FileOutputStream;
 import java.io.IOException;
@@ -35,10 +35,17 @@ import java.net.InetSocketAddress;
 import java.net.URL;
 import java.util.concurrent.atomic.AtomicBoolean;
 
+import static org.apache.flink.util.Preconditions.checkArgument;
+import static org.apache.flink.util.Preconditions.checkNotNull;
+
 /**
- * The BLOB cache implements a local cache for content-addressable BLOBs. 

[05/19] flink git commit: [hotfix] [docs] Updated DC/OS setup instructions.

2017-02-19 Thread sewen
[hotfix] [docs] Updated DC/OS setup instructions.

This closes #3349


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/e68ee5cb
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/e68ee5cb
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/e68ee5cb

Branch: refs/heads/master
Commit: e68ee5cb18ee8ebd50d17604608993790271929c
Parents: 20420fc
Author: Joerg Schad 
Authored: Fri Feb 17 16:10:53 2017 +0100
Committer: Stephan Ewen 
Committed: Sat Feb 18 19:19:34 2017 +0100

--
 docs/setup/mesos.md | 37 ++---
 1 file changed, 10 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/e68ee5cb/docs/setup/mesos.md
--
diff --git a/docs/setup/mesos.md b/docs/setup/mesos.md
index 77267cb..2ccee65 100644
--- a/docs/setup/mesos.md
+++ b/docs/setup/mesos.md
@@ -34,7 +34,7 @@ set up by the application master. The most sophisticated 
component of the Mesos
 implementation is the application master. The application master currently 
hosts
 the following components:
 
-### Mesos Scheduler 
+### Mesos Scheduler
 
 The scheduler is responsible for registering the framework with Mesos,
 requesting resources, and launching worker nodes. The scheduler continuously
@@ -57,7 +57,7 @@ The artifact server is responsible for providing resources to 
the worker
 nodes. The resources can be anything from the Flink binaries to shared secrets
 or configuration files. For instance, in non-containered environments, the
 artifact server will provide the Flink binaries. What files will be served
-depends on the configuration overlay used. 
+depends on the configuration overlay used.
 
 ### Flink's JobManager and Web Interface
 
@@ -87,30 +87,13 @@ If you don't have a running DC/OS cluster, please follow the
 [instructions on how to install DC/OS on the official 
website](https://dcos.io/install/).
 
 Once you have a DC/OS cluster, you may install Flink through the DC/OS
-Universe. In the search prompt, just search for Flink. 
+Universe. In the search prompt, just search for Flink. Alternatively, you can 
use the DC/OS CLI:
 
-**Note**: At the time of this writing, Flink was not yet available in the
-Universe. Please use the following workaround in the meantime:
+dcos package install flink
 
-1. [Install the DC/OS CLI](https://dcos.io/docs/1.8/usage/cli/install/)
+Further information can be found in the
+[DC/OS examples 
documentation](https://github.com/dcos/examples/tree/master/1.8/flink).
 
-2. Add the Development Universe
-
-`./dcos marathon app add 
https://raw.githubusercontent.com/mesosphere/dcos-flink-service/Makman2/quickstart/universe-server.json`
-
-3. Add the local Universe repository:
-
-   `./dcos package repo add --index=0 dev-universe 
http://universe.marathon.mesos:8085/repo`
-
-4. Install Flink through the Universe page or using the `dcos` command:
-   
-   `./dcos package install flink`
-
-In order to execute a Flink job on a DC/OS hosted Flink cluster, you first 
have to find out the address of the launched JobManager.
-The JobManager address can be found out by opening the Flink service, going to 
*Job Manager* and then using the address specified under 
`jobmanager.rpc.address` and `jobmanager.rpc.port`.
-Now you can use this address to submit a job to your cluster via
-
-FLINK_HOME/bin/flink run -m address:port flink-job.jar
 
 ## Mesos without DC/OS
 
@@ -167,7 +150,7 @@ A more convenient and easier to maintain approach is to use 
Docker containers to
 This is controlled via the following configuration entries:
 
 mesos.resourcemanager.tasks.container.type: mesos _or_ docker
-
+
 If set to 'docker', specify the image name:
 
 mesos.resourcemanager.tasks.container.image.name: image_name
@@ -181,7 +164,7 @@ which manage the Flink processes in a Mesos cluster:
 1. `mesos-appmaster.sh`
This starts the Mesos application master which will register the Mesos 
scheduler.
It is also responsible for starting up the worker nodes.
-   
+
 2. `mesos-taskmanager.sh`
The entry point for the Mesos worker processes.
You don't need to explicitly execute this script.
@@ -241,14 +224,14 @@ When running Flink with Marathon, the whole Flink cluster 
including the job mana
 
 `mesos.maximum-failed-tasks`: The maximum number of failed workers before the 
cluster fails (**DEFAULT**: Number of initial workers).
 May be set to -1 to disable this feature.
-
+
 `mesos.master`: The Mesos master URL. The value should be in one of the 
following forms:
 
 * `host:port`
 * `zk://host1:port1,host2:port2,.../path`
 * `zk://username:password@host1:port1,host2:port2,.../path`
 * `file:///path/to/file`
- 
+
 

[16/19] flink git commit: [FLINK-5747] [distributed coordination] Eager scheduling allocates slots and deploys tasks in bulk

2017-02-19 Thread sewen
[FLINK-5747] [distributed coordination] Eager scheduling allocates slots and 
deploys tasks in bulk

That way, strictly topological deployment can be guaranteed.

Also, many quick deploy/not-enough-resources/fail/recover cycles can be
avoided in the cases where resources need some time to appear.

This closes #3295


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/f113d794
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/f113d794
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/f113d794

Branch: refs/heads/master
Commit: f113d79451ba88c487358861cc3e20aac3d19257
Parents: 5902ea0
Author: Stephan Ewen 
Authored: Fri Feb 3 20:26:23 2017 +0100
Committer: Stephan Ewen 
Committed: Mon Feb 20 01:01:24 2017 +0100

--
 .../org/apache/flink/util/ExceptionUtils.java   |  12 +
 .../apache/flink/util/ExceptionUtilsTest.java   |  60 ++
 .../flink/runtime/concurrent/Executors.java |   3 +-
 .../flink/runtime/concurrent/FutureUtils.java   | 115 
 .../flink/runtime/executiongraph/Execution.java |  73 ++-
 .../executiongraph/ExecutionAndSlot.java|  46 ++
 .../runtime/executiongraph/ExecutionGraph.java  | 170 +-
 .../executiongraph/ExecutionGraphUtils.java | 106 
 .../executiongraph/ExecutionJobVertex.java  |  46 +-
 .../runtime/executiongraph/ExecutionVertex.java |   3 +-
 .../IllegalExecutionStateException.java |  53 ++
 .../apache/flink/runtime/instance/SlotPool.java |   9 +-
 .../runtime/concurrent/FutureUtilsTest.java | 194 ++
 .../ExecutionGraphSchedulingTest.java   | 610 +++
 .../executiongraph/ExecutionGraphUtilsTest.java | 124 
 .../ExecutionVertexCancelTest.java  |   2 +-
 .../ExecutionVertexSchedulingTest.java  |   3 -
 .../executiongraph/PointwisePatternTest.java|  12 +-
 .../executiongraph/ProgrammedSlotProvider.java  |  87 +++
 .../TerminalJobStatusListener.java  |  45 ++
 .../LeaderChangeJobRecoveryTest.java|  23 +-
 .../runtime/minicluster/MiniClusterITCase.java  |  28 +-
 .../Flip6LocalStreamEnvironment.java|   4 +-
 23 files changed, 1735 insertions(+), 93 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/f113d794/flink-core/src/main/java/org/apache/flink/util/ExceptionUtils.java
--
diff --git a/flink-core/src/main/java/org/apache/flink/util/ExceptionUtils.java 
b/flink-core/src/main/java/org/apache/flink/util/ExceptionUtils.java
index 6ba9ef6..69c2692 100644
--- a/flink-core/src/main/java/org/apache/flink/util/ExceptionUtils.java
+++ b/flink-core/src/main/java/org/apache/flink/util/ExceptionUtils.java
@@ -101,6 +101,18 @@ public final class ExceptionUtils {
}
 
/**
+* Rethrows the given {@code Throwable}, if it represents an error that 
is fatal to the JVM.
+* See {@link ExceptionUtils#isJvmFatalError(Throwable)} for a 
definition of fatal errors.
+* 
+* @param t The Throwable to check and rethrow.
+*/
+   public static void rethrowIfFatalError(Throwable t) {
+   if (isJvmFatalError(t)) {
+   throw (Error) t;
+   }
+   }
+
+   /**
 * Adds a new exception as a {@link Throwable#addSuppressed(Throwable) 
suppressed exception}
 * to a prior exception, or returns the new exception, if no prior 
exception exists.
 *

http://git-wip-us.apache.org/repos/asf/flink/blob/f113d794/flink-core/src/test/java/org/apache/flink/util/ExceptionUtilsTest.java
--
diff --git 
a/flink-core/src/test/java/org/apache/flink/util/ExceptionUtilsTest.java 
b/flink-core/src/test/java/org/apache/flink/util/ExceptionUtilsTest.java
new file mode 100644
index 000..343b9d6
--- /dev/null
+++ b/flink-core/src/test/java/org/apache/flink/util/ExceptionUtilsTest.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under 

[12/19] flink git commit: [FLINK-5640] [build] Configure the explicit Unit Test file suffix

2017-02-19 Thread sewen
[FLINK-5640] [build] Configure the explicit Unit Test file suffix

This closes #3211


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/4ce2557d
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/4ce2557d
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/4ce2557d

Branch: refs/heads/master
Commit: 4ce2557da623d84f160fa3993b8e27590752fedb
Parents: 1456f0a
Author: shijinkui 
Authored: Fri Feb 17 00:24:20 2017 +0800
Committer: Stephan Ewen 
Committed: Mon Feb 20 01:01:23 2017 +0100

--
 pom.xml | 7 +++
 1 file changed, 7 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/4ce2557d/pom.xml
--
diff --git a/pom.xml b/pom.xml
index 5487d70..e6fae81 100644
--- a/pom.xml
+++ b/pom.xml
@@ -965,6 +965,8 @@ under the License.

-Xlint:all


+
+   

org.apache.maven.plugins
maven-surefire-plugin
@@ -980,6 +982,7 @@ under the License.
-Xms256m -Xmx800m 
-Dmvn.forkNumber=${surefire.forkNumber} -XX:-UseGCOverheadLimit


+   

default-test
test
@@ -987,12 +990,16 @@ under the License.
test


+   
+   
**/*Test.*
+   


**/*ITCase.*

${flink-fast-tests-pattern}



+   

integration-tests
integration-test



[07/19] flink git commit: [hotfix] [core] Add missing @PublicEvolving annotations to classes in flink-core.

2017-02-19 Thread sewen
[hotfix] [core] Add missing @PublicEvolving annotations to classes in 
flink-core.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/0aa9918c
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/0aa9918c
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/0aa9918c

Branch: refs/heads/master
Commit: 0aa9918cc5b56682087c2c19eb31d9b321c97875
Parents: 391efd3
Author: Fabian Hueske 
Authored: Wed Feb 15 15:25:01 2017 +0100
Committer: Stephan Ewen 
Committed: Mon Feb 20 01:01:23 2017 +0100

--
 .../src/main/java/org/apache/flink/api/common/Archiveable.java  | 3 +++
 .../java/org/apache/flink/configuration/SecurityOptions.java| 3 +++
 .../java/org/apache/flink/core/io/VersionMismatchException.java | 5 -
 .../src/main/java/org/apache/flink/core/io/Versioned.java   | 3 +++
 .../org/apache/flink/core/io/VersionedIOReadableWritable.java   | 4 +++-
 .../apache/flink/migration/util/MigrationInstantiationUtil.java | 4 +++-
 .../java/org/apache/flink/migration/util/SerializedValue.java   | 2 ++
 7 files changed, 21 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/0aa9918c/flink-core/src/main/java/org/apache/flink/api/common/Archiveable.java
--
diff --git 
a/flink-core/src/main/java/org/apache/flink/api/common/Archiveable.java 
b/flink-core/src/main/java/org/apache/flink/api/common/Archiveable.java
index 09a3a0c..69e050d 100644
--- a/flink-core/src/main/java/org/apache/flink/api/common/Archiveable.java
+++ b/flink-core/src/main/java/org/apache/flink/api/common/Archiveable.java
@@ -17,8 +17,11 @@
  */
 package org.apache.flink.api.common;
 
+import org.apache.flink.annotation.PublicEvolving;
+
 import java.io.Serializable;
 
+@PublicEvolving
 public interface Archiveable {
T archive();
 }

http://git-wip-us.apache.org/repos/asf/flink/blob/0aa9918c/flink-core/src/main/java/org/apache/flink/configuration/SecurityOptions.java
--
diff --git 
a/flink-core/src/main/java/org/apache/flink/configuration/SecurityOptions.java 
b/flink-core/src/main/java/org/apache/flink/configuration/SecurityOptions.java
index 67d101d..95cf0c7 100644
--- 
a/flink-core/src/main/java/org/apache/flink/configuration/SecurityOptions.java
+++ 
b/flink-core/src/main/java/org/apache/flink/configuration/SecurityOptions.java
@@ -18,11 +18,14 @@
 
 package org.apache.flink.configuration;
 
+import org.apache.flink.annotation.PublicEvolving;
+
 import static org.apache.flink.configuration.ConfigOptions.key;
 
 /**
  * The set of configuration options relating to security.
  */
+@PublicEvolving
 public class SecurityOptions {
 
// 


http://git-wip-us.apache.org/repos/asf/flink/blob/0aa9918c/flink-core/src/main/java/org/apache/flink/core/io/VersionMismatchException.java
--
diff --git 
a/flink-core/src/main/java/org/apache/flink/core/io/VersionMismatchException.java
 
b/flink-core/src/main/java/org/apache/flink/core/io/VersionMismatchException.java
index 3ff88e9..92fd4f4 100644
--- 
a/flink-core/src/main/java/org/apache/flink/core/io/VersionMismatchException.java
+++ 
b/flink-core/src/main/java/org/apache/flink/core/io/VersionMismatchException.java
@@ -18,11 +18,14 @@
 
 package org.apache.flink.core.io;
 
+import org.apache.flink.annotation.PublicEvolving;
+
 import java.io.IOException;
 
 /**
  * This exception signals that incompatible versions have been found during 
serialization.
  */
+@PublicEvolving
 public class VersionMismatchException extends IOException {
 
private static final long serialVersionUID = 7024258967585372438L;
@@ -41,4 +44,4 @@ public class VersionMismatchException extends IOException {
public VersionMismatchException(Throwable cause) {
super(cause);
}
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/flink/blob/0aa9918c/flink-core/src/main/java/org/apache/flink/core/io/Versioned.java
--
diff --git a/flink-core/src/main/java/org/apache/flink/core/io/Versioned.java 
b/flink-core/src/main/java/org/apache/flink/core/io/Versioned.java
index b36d5e8..786bf73 100644
--- a/flink-core/src/main/java/org/apache/flink/core/io/Versioned.java
+++ b/flink-core/src/main/java/org/apache/flink/core/io/Versioned.java
@@ -18,10 +18,13 @@
 
 package org.apache.flink.core.io;
 
+import org.apache.flink.annotation.PublicEvolving;
+
 /**
  * This interface is implemented by classes that provide a version number. 
Versions numbers can be used to 

[13/19] flink git commit: [FLINK-5277] [tests] Add unit tests for ResultPartition#add() in case of failures

2017-02-19 Thread sewen
[FLINK-5277] [tests] Add unit tests for ResultPartition#add() in case of 
failures

This verifies that the given network buffer is recycled as expected and that
no notifiers are called upon failures to add a buffer.

This closes #3309


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/1ceb7d82
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/1ceb7d82
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/1ceb7d82

Branch: refs/heads/master
Commit: 1ceb7d82eccf4dc77482bddb61a664fd7f226b2b
Parents: 5e32eb5
Author: Nico Kruber 
Authored: Tue Feb 14 17:42:28 2017 +0100
Committer: Stephan Ewen 
Committed: Mon Feb 20 01:01:23 2017 +0100

--
 .../network/partition/ResultPartitionTest.java  | 75 
 1 file changed, 75 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/1ceb7d82/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/ResultPartitionTest.java
--
diff --git 
a/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/ResultPartitionTest.java
 
b/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/ResultPartitionTest.java
index f6562a1..0cd3591 100644
--- 
a/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/ResultPartitionTest.java
+++ 
b/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/ResultPartitionTest.java
@@ -20,13 +20,16 @@ package org.apache.flink.runtime.io.network.partition;
 
 import org.apache.flink.api.common.JobID;
 import org.apache.flink.runtime.io.disk.iomanager.IOManager;
+import org.apache.flink.runtime.io.network.buffer.Buffer;
 import org.apache.flink.runtime.io.network.util.TestBufferFactory;
 import org.apache.flink.runtime.taskmanager.TaskActions;
+import org.junit.Assert;
 import org.junit.Test;
 
 import static org.mockito.Matchers.any;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.reset;
 import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
 
@@ -70,6 +73,78 @@ public class ResultPartitionTest {
}
}
 
+   @Test
+   public void testAddOnFinishedPipelinedPartition() throws Exception {
+   testAddOnFinishedPartition(ResultPartitionType.PIPELINED);
+   }
+
+   @Test
+   public void testAddOnFinishedBlockingPartition() throws Exception {
+   testAddOnFinishedPartition(ResultPartitionType.BLOCKING);
+   }
+
+   /**
+* Tests {@link ResultPartition#add} on a partition which has already 
finished.
+*
+* @param pipelined the result partition type to set up
+*/
+   protected void testAddOnFinishedPartition(final ResultPartitionType 
pipelined)
+   throws Exception {
+   Buffer buffer = TestBufferFactory.createBuffer();
+   ResultPartitionConsumableNotifier notifier = 
mock(ResultPartitionConsumableNotifier.class);
+   try {
+   ResultPartition partition = createPartition(notifier, 
pipelined, true);
+   partition.finish();
+   reset(notifier);
+   // partition.add() should fail
+   partition.add(buffer, 0);
+   Assert.fail("exception expected");
+   } catch (IllegalStateException e) {
+   // expected => ignored
+   } finally {
+   if (!buffer.isRecycled()) {
+   Assert.fail("buffer not recycled");
+   buffer.recycle();
+   }
+   // should not have notified either
+   verify(notifier, 
never()).notifyPartitionConsumable(any(JobID.class), 
any(ResultPartitionID.class), any(TaskActions.class));
+   }
+   }
+
+   @Test
+   public void testAddOnReleasedPipelinedPartition() throws Exception {
+   testAddOnReleasedPartition(ResultPartitionType.PIPELINED);
+   }
+
+   @Test
+   public void testAddOnReleasedBlockingPartition() throws Exception {
+   testAddOnReleasedPartition(ResultPartitionType.BLOCKING);
+   }
+
+   /**
+* Tests {@link ResultPartition#add} on a partition which has already 
been released.
+*
+* @param pipelined the result partition type to set up
+*/
+   protected void testAddOnReleasedPartition(final ResultPartitionType 
pipelined)
+   throws Exception {
+   Buffer buffer = TestBufferFactory.createBuffer();
+   

[02/19] flink git commit: [FLINK-5522] [storm compatibility] Move Storm LocalCluster based test to a separate class

2017-02-19 Thread sewen
[FLINK-5522] [storm compatibility] Move Storm LocalCluster based test to a 
separate class

This fixes the problem that the Storm LocalCluster can't run with powermock

This closes #3138


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/d05fc377
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/d05fc377
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/d05fc377

Branch: refs/heads/master
Commit: d05fc377ee688b231fb1b0daeb8a34fd054f3ca1
Parents: 5313459
Author: liuyuzhong7 
Authored: Thu Feb 9 16:16:15 2017 +0800
Committer: Stephan Ewen 
Committed: Sat Feb 18 19:19:34 2017 +0100

--
 .../storm/wrappers/WrapperSetupHelperTest.java  | 167 +---
 .../WrapperSetupInLocalClusterTest.java | 190 +++
 2 files changed, 191 insertions(+), 166 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/d05fc377/flink-contrib/flink-storm/src/test/java/org/apache/flink/storm/wrappers/WrapperSetupHelperTest.java
--
diff --git 
a/flink-contrib/flink-storm/src/test/java/org/apache/flink/storm/wrappers/WrapperSetupHelperTest.java
 
b/flink-contrib/flink-storm/src/test/java/org/apache/flink/storm/wrappers/WrapperSetupHelperTest.java
index 5e29ac4..5f38705 100644
--- 
a/flink-contrib/flink-storm/src/test/java/org/apache/flink/storm/wrappers/WrapperSetupHelperTest.java
+++ 
b/flink-contrib/flink-storm/src/test/java/org/apache/flink/storm/wrappers/WrapperSetupHelperTest.java
@@ -17,29 +17,15 @@
 
 package org.apache.flink.storm.wrappers;
 
-import org.apache.storm.Config;
-import org.apache.storm.LocalCluster;
-import org.apache.storm.generated.ComponentCommon;
-import org.apache.storm.generated.StormTopology;
-import org.apache.storm.task.TopologyContext;
+import org.apache.flink.storm.util.AbstractTest;
 import org.apache.storm.topology.IComponent;
 import org.apache.storm.topology.IRichBolt;
 import org.apache.storm.topology.IRichSpout;
-import org.apache.storm.topology.TopologyBuilder;
 import org.apache.storm.tuple.Fields;
 import org.apache.storm.utils.Utils;
-
-import org.apache.flink.storm.api.FlinkTopology;
-import org.apache.flink.storm.util.AbstractTest;
-import org.apache.flink.storm.util.TestDummyBolt;
-import org.apache.flink.storm.util.TestDummySpout;
-import org.apache.flink.storm.util.TestSink;
-import org.apache.flink.streaming.api.operators.StreamingRuntimeContext;
-
 import org.junit.Assert;
 import org.junit.Test;
 import org.junit.runner.RunWith;
-
 import org.powermock.api.mockito.PowerMockito;
 import org.powermock.core.classloader.annotations.PowerMockIgnore;
 import org.powermock.core.classloader.annotations.PrepareForTest;
@@ -47,14 +33,9 @@ import org.powermock.modules.junit4.PowerMockRunner;
 
 import java.util.HashMap;
 import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Set;
 
 import static java.util.Collections.singleton;
-
 import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
 
 @RunWith(PowerMockRunner.class)
 @PrepareForTest(WrapperSetupHelper.class)
@@ -150,150 +131,4 @@ public class WrapperSetupHelperTest extends AbstractTest {
boltOrSpout,
numberOfAttributes == -1 ? new 
HashSet(singleton(Utils.DEFAULT_STREAM_ID)) : null));
}
-
-   @Test
-   public void testCreateTopologyContext() {
-   HashMap dops = new HashMap();
-   dops.put("spout1", 1);
-   dops.put("spout2", 3);
-   dops.put("bolt1", 1);
-   dops.put("bolt2", 2);
-   dops.put("sink", 1);
-
-   HashMap taskCounter = new HashMap();
-   taskCounter.put("spout1", 0);
-   taskCounter.put("spout2", 0);
-   taskCounter.put("bolt1", 0);
-   taskCounter.put("bolt2", 0);
-   taskCounter.put("sink", 0);
-
-   HashMap operators = new HashMap();
-   operators.put("spout1", new TestDummySpout());
-   operators.put("spout2", new TestDummySpout());
-   operators.put("bolt1", new TestDummyBolt());
-   operators.put("bolt2", new TestDummyBolt());
-   operators.put("sink", new TestSink());
-
-   TopologyBuilder builder = new TopologyBuilder();
-
-   builder.setSpout("spout1", (IRichSpout) 
operators.get("spout1"), dops.get("spout1"));
-   builder.setSpout("spout2", (IRichSpout) 
operators.get("spout2"), dops.get("spout2"));
-   builder.setBolt("bolt1", (IRichBolt) 

[08/19] flink git commit: [FLINK-5739] [client] Fix NullPointerException in CliFrontend

2017-02-19 Thread sewen
[FLINK-5739] [client] Fix NullPointerException in CliFrontend

This closes #3292


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/5e32eb54
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/5e32eb54
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/5e32eb54

Branch: refs/heads/master
Commit: 5e32eb549d3bc2195548620005fcf54437e75f48
Parents: 3104619
Author: Zhuoluo Yang 
Authored: Tue Feb 14 09:54:50 2017 +0800
Committer: Stephan Ewen 
Committed: Mon Feb 20 01:01:23 2017 +0100

--
 .../src/main/java/org/apache/flink/client/CliFrontend.java | 6 ++
 .../flink/optimizer/plantranslate/JobGraphGenerator.java   | 3 ++-
 2 files changed, 8 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/5e32eb54/flink-clients/src/main/java/org/apache/flink/client/CliFrontend.java
--
diff --git 
a/flink-clients/src/main/java/org/apache/flink/client/CliFrontend.java 
b/flink-clients/src/main/java/org/apache/flink/client/CliFrontend.java
index b6543a3..8c84c5a 100644
--- a/flink-clients/src/main/java/org/apache/flink/client/CliFrontend.java
+++ b/flink-clients/src/main/java/org/apache/flink/client/CliFrontend.java
@@ -842,6 +842,12 @@ public class CliFrontend {
program.deleteExtractedLibraries();
}
 
+   if (null == result) {
+   logAndSysout("No JobSubmissionResult returned, please 
make sure you called " +
+   "ExecutionEnvironment.execute()");
+   return 1;
+   }
+
if (result.isJobExecutionResult()) {
logAndSysout("Program execution finished");
JobExecutionResult execResult = 
result.getJobExecutionResult();

http://git-wip-us.apache.org/repos/asf/flink/blob/5e32eb54/flink-optimizer/src/main/java/org/apache/flink/optimizer/plantranslate/JobGraphGenerator.java
--
diff --git 
a/flink-optimizer/src/main/java/org/apache/flink/optimizer/plantranslate/JobGraphGenerator.java
 
b/flink-optimizer/src/main/java/org/apache/flink/optimizer/plantranslate/JobGraphGenerator.java
index 4ccfae3..6f7b04a 100644
--- 
a/flink-optimizer/src/main/java/org/apache/flink/optimizer/plantranslate/JobGraphGenerator.java
+++ 
b/flink-optimizer/src/main/java/org/apache/flink/optimizer/plantranslate/JobGraphGenerator.java
@@ -172,7 +172,8 @@ public class JobGraphGenerator implements Visitor 
{

public JobGraph compileJobGraph(OptimizedPlan program, JobID jobId) {
if (program == null) {
-   throw new NullPointerException();
+   throw new NullPointerException("Program is null, did 
you called " +
+   "ExecutionEnvironment.execute()");
}

if (jobId == null) {



[01/19] flink git commit: [FLINK-5817] [tests] Use TemporaryFold to create temp files and folds for test

2017-02-19 Thread sewen
Repository: flink
Updated Branches:
  refs/heads/master 70475b367 -> f113d7945


[FLINK-5817] [tests] Use TemporaryFold to create temp files and folds for test

This closes #3341


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/709fa1d9
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/709fa1d9
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/709fa1d9

Branch: refs/heads/master
Commit: 709fa1d95b7dbbcfdd1124de7d6e073834ca75cf
Parents: d05fc37
Author: wenlong.lwl 
Authored: Fri Feb 17 17:14:54 2017 +0800
Committer: Stephan Ewen 
Committed: Sat Feb 18 19:19:34 2017 +0100

--
 .../FileCacheDeleteValidationTest.java  | 12 +++--
 .../io/disk/iomanager/IOManagerTest.java| 11 ++--
 .../flink/test/util/AbstractTestBase.java   | 53 ++--
 3 files changed, 31 insertions(+), 45 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/709fa1d9/flink-runtime/src/test/java/org/apache/flink/runtime/filecache/FileCacheDeleteValidationTest.java
--
diff --git 
a/flink-runtime/src/test/java/org/apache/flink/runtime/filecache/FileCacheDeleteValidationTest.java
 
b/flink-runtime/src/test/java/org/apache/flink/runtime/filecache/FileCacheDeleteValidationTest.java
index 4db0d93..4dca3db 100644
--- 
a/flink-runtime/src/test/java/org/apache/flink/runtime/filecache/FileCacheDeleteValidationTest.java
+++ 
b/flink-runtime/src/test/java/org/apache/flink/runtime/filecache/FileCacheDeleteValidationTest.java
@@ -19,6 +19,7 @@
 package org.apache.flink.runtime.filecache;
 
 import java.io.File;
+import java.io.IOException;
 import java.util.concurrent.Future;
 
 import org.apache.flink.core.fs.Path;
@@ -27,10 +28,12 @@ import org.apache.flink.api.common.JobID;
 
 import org.junit.After;
 import org.junit.Before;
+import org.junit.Rule;
 import org.junit.Test;
 
 import com.google.common.base.Charsets;
 import com.google.common.io.Files;
+import org.junit.rules.TemporaryFolder;
 
 import static org.junit.Assert.fail;
 import static org.junit.Assert.assertTrue;
@@ -56,12 +59,15 @@ public class FileCacheDeleteValidationTest {
+ "Da flammt ein blitzendes Verheeren Dem Pfade vor des 
Donnerschlags. Doch\n"
+ "deine Boten, Herr, verehren Das sanfte Wandeln deines 
Tags.\n";
 
+   @Rule
+   public final TemporaryFolder temporaryFolder = new TemporaryFolder();
+
private FileCache fileCache;
private File f;

@Before
-   public void setup() {
-   String[] tmpDirectories = 
System.getProperty("java.io.tmpdir").split(",|" + File.pathSeparator);
+   public void setup() throws IOException {
+   String[] tmpDirectories = new 
String[]{temporaryFolder.newFolder().getAbsolutePath()};
try {
fileCache = new FileCache(tmpDirectories);
}
@@ -70,7 +76,7 @@ public class FileCacheDeleteValidationTest {
fail("Cannot create FileCache: " + e.getMessage());
}

-   f = new File(System.getProperty("java.io.tmpdir"), "cacheFile");
+   f = temporaryFolder.newFile("cacheFile");
try {
Files.write(testFileContent, f, Charsets.UTF_8);
}

http://git-wip-us.apache.org/repos/asf/flink/blob/709fa1d9/flink-runtime/src/test/java/org/apache/flink/runtime/io/disk/iomanager/IOManagerTest.java
--
diff --git 
a/flink-runtime/src/test/java/org/apache/flink/runtime/io/disk/iomanager/IOManagerTest.java
 
b/flink-runtime/src/test/java/org/apache/flink/runtime/io/disk/iomanager/IOManagerTest.java
index 39cb8ee..156098e 100644
--- 
a/flink-runtime/src/test/java/org/apache/flink/runtime/io/disk/iomanager/IOManagerTest.java
+++ 
b/flink-runtime/src/test/java/org/apache/flink/runtime/io/disk/iomanager/IOManagerTest.java
@@ -21,7 +21,9 @@ package org.apache.flink.runtime.io.disk.iomanager;
 import org.apache.flink.core.memory.MemorySegment;
 import org.apache.flink.runtime.io.disk.iomanager.FileIOChannel.ID;
 import org.apache.flink.runtime.io.network.buffer.Buffer;
+import org.junit.Rule;
 import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
 
 import java.io.File;
 import java.io.IOException;
@@ -34,12 +36,15 @@ import static org.junit.Assert.assertTrue;
 
 public class IOManagerTest {
 
+   @Rule
+   public final TemporaryFolder  temporaryFolder = new TemporaryFolder();
+
@Test
-   public void channelEnumerator() {
+   public void channelEnumerator() throws IOException {
IOManager ioMan = null;
 
  

[06/19] flink git commit: [FLINK-5828] [distributed runtime] Fix initialization of Blob storage directories

2017-02-19 Thread sewen
[FLINK-5828] [distributed runtime] Fix initialization of Blob storage 
directories

Flip the logic (check existence and create directory) to resolve currency 
problem

This closes #3342


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/20420fc6
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/20420fc6
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/20420fc6

Branch: refs/heads/master
Commit: 20420fc6ee153c7171265dda7bf7d593c17fb375
Parents: 70475b3
Author: 士远 
Authored: Fri Feb 17 17:42:22 2017 +0800
Committer: Stephan Ewen 
Committed: Sat Feb 18 19:19:34 2017 +0100

--
 .../src/main/java/org/apache/flink/runtime/blob/BlobUtils.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/20420fc6/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobUtils.java
--
diff --git 
a/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobUtils.java 
b/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobUtils.java
index 136df09..aeaa602 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobUtils.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobUtils.java
@@ -124,7 +124,7 @@ public class BlobUtils {
private static File getCacheDirectory(File storageDir) {
final File cacheDirectory = new File(storageDir, "cache");
 
-   if (!cacheDirectory.exists() && !cacheDirectory.mkdirs()) {
+   if (!cacheDirectory.mkdirs() && !cacheDirectory.exists()) {
throw new RuntimeException("Could not create cache 
directory '" + cacheDirectory.getAbsolutePath() + "'.");
}
 



[14/19] flink git commit: [FLINK-5812] [core] Cleanups in FileSystem (round 1)

2017-02-19 Thread sewen
[FLINK-5812] [core] Cleanups in FileSystem (round 1)

  - This makes the FileSystem use the 'WriteMode' (otherwise it was an unused 
enumeration)
  - Extends comments
  - Deprecate the method that controls the replication factor and block size


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/a1bfae95
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/a1bfae95
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/a1bfae95

Branch: refs/heads/master
Commit: a1bfae95fec8d076ef90d5a36ffa32d3870870d8
Parents: 31c26e3
Author: Stephan Ewen 
Authored: Wed Feb 15 17:10:53 2017 +0100
Committer: Stephan Ewen 
Committed: Mon Feb 20 01:01:24 2017 +0100

--
 .../flink/api/common/io/FileOutputFormat.java   |  4 +-
 .../org/apache/flink/core/fs/FileSystem.java| 82 +---
 .../core/fs/SafetyNetWrapperFileSystem.java |  6 +-
 .../flink/core/fs/local/LocalFileSystem.java| 26 ---
 .../flink/util/AbstractCloseableRegistry.java   | 15 ++--
 .../flink/runtime/fs/hdfs/HadoopFileSystem.java |  4 +-
 .../flink/runtime/fs/maprfs/MapRFileSystem.java |  4 +-
 7 files changed, 104 insertions(+), 37 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/a1bfae95/flink-core/src/main/java/org/apache/flink/api/common/io/FileOutputFormat.java
--
diff --git 
a/flink-core/src/main/java/org/apache/flink/api/common/io/FileOutputFormat.java 
b/flink-core/src/main/java/org/apache/flink/api/common/io/FileOutputFormat.java
index 0ab12df..1382f06 100644
--- 
a/flink-core/src/main/java/org/apache/flink/api/common/io/FileOutputFormat.java
+++ 
b/flink-core/src/main/java/org/apache/flink/api/common/io/FileOutputFormat.java
@@ -104,7 +104,7 @@ public abstract class FileOutputFormat extends 
RichOutputFormat implemen
protected Path outputFilePath;

/**
-* The write mode of the output.
+* The write mode of the output.
 */
private WriteMode writeMode;

@@ -249,7 +249,7 @@ public abstract class FileOutputFormat extends 
RichOutputFormat implemen
this.actualFilePath = (numTasks > 1 || outputDirectoryMode == 
OutputDirectoryMode.ALWAYS) ? p.suffix("/" + getDirectoryFileName(taskNumber)) 
: p;
 
// create output file
-   this.stream = fs.create(this.actualFilePath, writeMode == 
WriteMode.OVERWRITE);
+   this.stream = fs.create(this.actualFilePath, writeMode);

// at this point, the file creation must have succeeded, or an 
exception has been thrown
this.fileCreated = true;

http://git-wip-us.apache.org/repos/asf/flink/blob/a1bfae95/flink-core/src/main/java/org/apache/flink/core/fs/FileSystem.java
--
diff --git a/flink-core/src/main/java/org/apache/flink/core/fs/FileSystem.java 
b/flink-core/src/main/java/org/apache/flink/core/fs/FileSystem.java
index c3828fb..4149d5e 100644
--- a/flink-core/src/main/java/org/apache/flink/core/fs/FileSystem.java
+++ b/flink-core/src/main/java/org/apache/flink/core/fs/FileSystem.java
@@ -33,6 +33,7 @@ import org.apache.flink.core.fs.local.LocalFileSystem;
 import org.apache.flink.util.IOUtils;
 import org.apache.flink.util.OperatingSystem;
 import org.apache.flink.util.Preconditions;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -59,7 +60,20 @@ import static 
org.apache.flink.util.Preconditions.checkNotNull;
  * machine-local file system). Other file system types are accessed by an 
implementation that bridges
  * to the suite of file systems supported by Hadoop (such as for example HDFS).
  * 
- * Data Persistence
+ * Scope and Purpose
+ * 
+ * The purpose of this abstraction is used to expose a common and well defined 
interface for
+ * access to files. This abstraction is used both by Flink's fault tolerance 
mechanism (storing
+ * state and recovery data) and by reusable built-in connectors (file sources 
/ sinks).
+ * 
+ * The purpose of this abstraction is not to give user programs an 
abstraction with
+ * extreme flexibility and control across all possible file systems. That 
mission would be a folly,
+ * as the differences in characteristics of even the most common file systems 
are already quite
+ * large. It is expected that user programs that need specialized 
functionality of certain file systems
+ * in their functions, operations, sources, or sinks instantiate the 
specialized file system adapters
+ * directly.
+ * 
+ * Data Persistence Contract
  * 
  * The FileSystem's {@link FSDataOutputStream output streams} are used to 
persistently store data,
  * both for results of streaming applications and 

[19/19] flink git commit: [FLINK-5812] [core] Cleanups in FileSystem (round 2)

2017-02-19 Thread sewen
[FLINK-5812] [core] Cleanups in FileSystem (round 2)

Move the FileSystem safety net to a separate class.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/5902ea0e
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/5902ea0e
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/5902ea0e

Branch: refs/heads/master
Commit: 5902ea0e88c70f330c23b9ace94033ae34c84445
Parents: a1bfae9
Author: Stephan Ewen 
Authored: Wed Feb 15 17:58:37 2017 +0100
Committer: Stephan Ewen 
Committed: Mon Feb 20 01:01:24 2017 +0100

--
 flink-core/pom.xml  |   2 +-
 .../org/apache/flink/core/fs/FileSystem.java|  52 +---
 .../flink/core/fs/FileSystemSafetyNet.java  | 124 +++
 .../flink/util/AbstractCloseableRegistry.java   |   4 -
 .../core/fs/SafetyNetCloseableRegistryTest.java |   8 +-
 .../apache/flink/runtime/taskmanager/Task.java  |  11 +-
 6 files changed, 140 insertions(+), 61 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/5902ea0e/flink-core/pom.xml
--
diff --git a/flink-core/pom.xml b/flink-core/pom.xml
index e9738a2..0a0d06e 100644
--- a/flink-core/pom.xml
+++ b/flink-core/pom.xml
@@ -154,7 +154,7 @@ under the License.



org.apache.flink.api.common.ExecutionConfig#CONFIG_KEY
-   
org.apache.flink.core.fs.FileSystem$FSKey
+   
org.apache.flink.core.fs.FileSystem\$FSKey

org.apache.flink.api.java.typeutils.WritableTypeInfo


http://git-wip-us.apache.org/repos/asf/flink/blob/5902ea0e/flink-core/src/main/java/org/apache/flink/core/fs/FileSystem.java
--
diff --git a/flink-core/src/main/java/org/apache/flink/core/fs/FileSystem.java 
b/flink-core/src/main/java/org/apache/flink/core/fs/FileSystem.java
index 4149d5e..fab0f4d 100644
--- a/flink-core/src/main/java/org/apache/flink/core/fs/FileSystem.java
+++ b/flink-core/src/main/java/org/apache/flink/core/fs/FileSystem.java
@@ -17,7 +17,7 @@
  */
 
 
-/**
+/*
  * This file is based on source code from the Hadoop Project 
(http://hadoop.apache.org/), licensed by the Apache
  * Software Foundation (ASF) under the Apache License, Version 2.0. See the 
NOTICE file distributed with this work for
  * additional information regarding copyright ownership.
@@ -30,12 +30,7 @@ import org.apache.flink.annotation.Public;
 import org.apache.flink.configuration.ConfigConstants;
 import org.apache.flink.configuration.Configuration;
 import org.apache.flink.core.fs.local.LocalFileSystem;
-import org.apache.flink.util.IOUtils;
 import org.apache.flink.util.OperatingSystem;
-import org.apache.flink.util.Preconditions;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import javax.annotation.Nullable;
 import java.io.File;
@@ -174,6 +169,9 @@ import static 
org.apache.flink.util.Preconditions.checkNotNull;
  * application task finishes (or is canceled or failed). That way, the task's 
threads do not
  * leak connections.
  * 
+ * Internal runtime code can explicitly obtain a FileSystem that does not 
use the safety
+ * net via {@link FileSystem#getUnguardedFileSystem(URI)}.
+ * 
  * @see FSDataInputStream
  * @see FSDataOutputStream
  */
@@ -198,57 +196,18 @@ public abstract class FileSystem {
 
// 

 
-   private static final ThreadLocal REGISTRIES 
= new ThreadLocal<>();
-
private static final String HADOOP_WRAPPER_FILESYSTEM_CLASS = 
"org.apache.flink.runtime.fs.hdfs.HadoopFileSystem";
 
private static final String MAPR_FILESYSTEM_CLASS = 
"org.apache.flink.runtime.fs.maprfs.MapRFileSystem";
 
private static final String HADOOP_WRAPPER_SCHEME = "hdwrapper";
 
-   private static final Logger LOG = 
LoggerFactory.getLogger(FileSystem.class);
-
/** This lock guards the methods {@link #initOutPathLocalFS(Path, 
WriteMode, boolean)} and
 * {@link #initOutPathDistFS(Path, WriteMode, boolean)} which are 
otherwise susceptible to races */
private static final ReentrantLock OUTPUT_DIRECTORY_INIT_LOCK = new 
ReentrantLock(true);
 
// 

 
-   /**
-* Create a SafetyNetCloseableRegistry for a Task. This method should 
be called at the beginning of the task's
-

[09/19] flink git commit: [FLINK-5817] [test] (followup) Fix temporary folder and temp file path generation

2017-02-19 Thread sewen
[FLINK-5817] [test] (followup) Fix temporary folder and temp file path 
generation

This makes sure the TemporaryFolder rule is already evaluated by the time
the temp files are generated.

This also injects a random parent directory to ensure that even for fix 
directory/file
names, the absolute path is randomized.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/1456f0a7
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/1456f0a7
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/1456f0a7

Branch: refs/heads/master
Commit: 1456f0a7084f45056ea9b09e3f85b1aae6b11c6e
Parents: 709fa1d
Author: Stephan Ewen 
Authored: Sun Feb 19 16:21:07 2017 +0100
Committer: Stephan Ewen 
Committed: Mon Feb 20 01:01:23 2017 +0100

--
 .../org/apache/flink/test/util/AbstractTestBase.java   | 13 ++---
 .../java/org/apache/flink/test/util/TestBaseUtils.java |  4 
 2 files changed, 10 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/1456f0a7/flink-test-utils-parent/flink-test-utils/src/main/java/org/apache/flink/test/util/AbstractTestBase.java
--
diff --git 
a/flink-test-utils-parent/flink-test-utils/src/main/java/org/apache/flink/test/util/AbstractTestBase.java
 
b/flink-test-utils-parent/flink-test-utils/src/main/java/org/apache/flink/test/util/AbstractTestBase.java
index 544d473..be9b0b7 100644
--- 
a/flink-test-utils-parent/flink-test-utils/src/main/java/org/apache/flink/test/util/AbstractTestBase.java
+++ 
b/flink-test-utils-parent/flink-test-utils/src/main/java/org/apache/flink/test/util/AbstractTestBase.java
@@ -24,7 +24,7 @@ import com.google.common.io.Files;
 import org.apache.flink.configuration.Configuration;
 import org.apache.flink.runtime.minicluster.LocalFlinkMiniCluster;
 
-import org.junit.Rule;
+import org.junit.ClassRule;
 import org.junit.rules.TemporaryFolder;
 
 import scala.concurrent.duration.FiniteDuration;
@@ -49,8 +49,8 @@ public abstract class AbstractTestBase extends TestBaseUtils {
 
protected int numTaskManagers = 1;
 
-   @Rule
-   public final TemporaryFolder temporaryFolder = new TemporaryFolder();
+   @ClassRule
+   public static final TemporaryFolder temporaryFolder = new 
TemporaryFolder();
 
 
/** The mini cluster that runs the test programs */
@@ -106,12 +106,12 @@ public abstract class AbstractTestBase extends 
TestBaseUtils {
// 

 
public String getTempDirPath(String dirName) throws IOException {
-   File f = temporaryFolder.newFolder(dirName);
+   File f = createAndRegisterTempFile(dirName);
return f.toURI().toString();
}
 
public String getTempFilePath(String fileName) throws IOException {
-   File f = temporaryFolder.newFile(fileName);
+   File f = createAndRegisterTempFile(fileName);
return f.toURI().toString();
}
 
@@ -122,7 +122,6 @@ public abstract class AbstractTestBase extends 
TestBaseUtils {
}
 
public File createAndRegisterTempFile(String fileName) throws 
IOException {
-   return temporaryFolder.newFile(fileName);
+   return new File(temporaryFolder.newFolder(), fileName);
}
-
 }

http://git-wip-us.apache.org/repos/asf/flink/blob/1456f0a7/flink-test-utils-parent/flink-test-utils/src/main/java/org/apache/flink/test/util/TestBaseUtils.java
--
diff --git 
a/flink-test-utils-parent/flink-test-utils/src/main/java/org/apache/flink/test/util/TestBaseUtils.java
 
b/flink-test-utils-parent/flink-test-utils/src/main/java/org/apache/flink/test/util/TestBaseUtils.java
index 8431226..cc7c0e2 100644
--- 
a/flink-test-utils-parent/flink-test-utils/src/main/java/org/apache/flink/test/util/TestBaseUtils.java
+++ 
b/flink-test-utils-parent/flink-test-utils/src/main/java/org/apache/flink/test/util/TestBaseUtils.java
@@ -74,6 +74,8 @@ import java.util.concurrent.TimeUnit;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
+import static org.apache.flink.util.Preconditions.checkArgument;
+
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
@@ -283,6 +285,8 @@ public class TestBaseUtils extends TestLogger {
String resultPath,
String[] excludePrefixes,
boolean inOrderOfFiles) throws IOException {
+   
+   checkArgument(resultPath != null, "resultPath cannot be be 
null");
 
final 

[11/19] flink git commit: [FLINK-5669] [contrib] Change DataStreamUtils to use the loopback address (127.0.0.1) with local environments.

2017-02-19 Thread sewen
[FLINK-5669] [contrib] Change DataStreamUtils to use the loopback address 
(127.0.0.1) with local environments.

Using loopback rather than the "local address" allows tests to run in
situations where the local machine's hostname may not be resolvable in DNS
(because DNS is unreacable or the hostname is not found) or the hostname does
resolve, but not to an IP address that is reachable.

This closes #3223


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/31046192
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/31046192
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/31046192

Branch: refs/heads/master
Commit: 3104619250fa0e0e87b4bb3e05b1cce9d39e6983
Parents: 4ce2557
Author: Rick Cox 
Authored: Thu Jan 26 14:55:23 2017 -0800
Committer: Stephan Ewen 
Committed: Mon Feb 20 01:01:23 2017 +0100

--
 .../java/org/apache/flink/contrib/streaming/DataStreamUtils.java  | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/31046192/flink-contrib/flink-streaming-contrib/src/main/java/org/apache/flink/contrib/streaming/DataStreamUtils.java
--
diff --git 
a/flink-contrib/flink-streaming-contrib/src/main/java/org/apache/flink/contrib/streaming/DataStreamUtils.java
 
b/flink-contrib/flink-streaming-contrib/src/main/java/org/apache/flink/contrib/streaming/DataStreamUtils.java
index d4ef9ee..2987597 100644
--- 
a/flink-contrib/flink-streaming-contrib/src/main/java/org/apache/flink/contrib/streaming/DataStreamUtils.java
+++ 
b/flink-contrib/flink-streaming-contrib/src/main/java/org/apache/flink/contrib/streaming/DataStreamUtils.java
@@ -21,6 +21,7 @@ import org.apache.flink.api.common.typeutils.TypeSerializer;
 import org.apache.flink.runtime.net.ConnectionUtils;
 import org.apache.flink.streaming.api.datastream.DataStream;
 import org.apache.flink.streaming.api.datastream.DataStreamSink;
+import org.apache.flink.streaming.api.environment.LocalStreamEnvironment;
 import org.apache.flink.streaming.api.environment.RemoteStreamEnvironment;
 import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
 
@@ -57,6 +58,8 @@ public final class DataStreamUtils {
throw new IOException("Could not determine an 
suitable network address to " +
"receive back data from the 
streaming program.", e);
}
+   } else if (env instanceof LocalStreamEnvironment) {
+   clientAddress = InetAddress.getLoopbackAddress();
} else {
try {
clientAddress = InetAddress.getLocalHost();



[18/19] flink git commit: [hotfix] [tests] Use random actor names in JobManagerHARecoveryTest to avoid name collisions

2017-02-19 Thread sewen
[hotfix] [tests] Use random actor names in JobManagerHARecoveryTest to avoid 
name collisions


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/31c26e3f
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/31c26e3f
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/31c26e3f

Branch: refs/heads/master
Commit: 31c26e3fc06ad1939284249547c3885a5b62a8f3
Parents: 082d40f
Author: Stephan Ewen 
Authored: Fri Feb 17 20:01:25 2017 +0100
Committer: Stephan Ewen 
Committed: Mon Feb 20 01:01:24 2017 +0100

--
 .../flink/runtime/jobmanager/JobManagerHARecoveryTest.java   | 8 +++-
 1 file changed, 3 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/31c26e3f/flink-runtime/src/test/java/org/apache/flink/runtime/jobmanager/JobManagerHARecoveryTest.java
--
diff --git 
a/flink-runtime/src/test/java/org/apache/flink/runtime/jobmanager/JobManagerHARecoveryTest.java
 
b/flink-runtime/src/test/java/org/apache/flink/runtime/jobmanager/JobManagerHARecoveryTest.java
index 8985a34..5f2edac 100644
--- 
a/flink-runtime/src/test/java/org/apache/flink/runtime/jobmanager/JobManagerHARecoveryTest.java
+++ 
b/flink-runtime/src/test/java/org/apache/flink/runtime/jobmanager/JobManagerHARecoveryTest.java
@@ -169,9 +169,7 @@ public class JobManagerHARecoveryTest {
InstanceManager instanceManager = new InstanceManager();
instanceManager.addInstanceListener(scheduler);
 
-   archive = system.actorOf(Props.create(
-   MemoryArchivist.class,
-   10), "archive");
+   archive = 
system.actorOf(Props.create(MemoryArchivist.class, 10));
 
Props jobManagerProps = Props.create(
TestingJobManager.class,
@@ -190,7 +188,7 @@ public class JobManagerHARecoveryTest {
jobRecoveryTimeout,
Option.apply(null));
 
-   jobManager = system.actorOf(jobManagerProps, 
"jobmanager");
+   jobManager = system.actorOf(jobManagerProps);
ActorGateway gateway = new AkkaActorGateway(jobManager, 
leaderSessionID);
 
taskManager = 
TaskManager.startTaskManagerComponentsAndActor(
@@ -360,7 +358,7 @@ public class JobManagerHARecoveryTest {
Option.apply(null),

recoveredJobs).withDispatcher(CallingThreadDispatcher.Id());
 
-   jobManager = system.actorOf(jobManagerProps, 
"jobmanager");
+   jobManager = system.actorOf(jobManagerProps);
 
Future started = Patterns.ask(jobManager, new 
Identify(42), deadline.timeLeft().toMillis());
 



[1/2] flink git commit: [hotfix] [docs] Updated DC/OS setup instructions.

2017-02-19 Thread sewen
Repository: flink
Updated Branches:
  refs/heads/release-1.2 b21f9d11d -> fee020c42


[hotfix] [docs] Updated DC/OS setup instructions.

This closes #3349


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/fee020c4
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/fee020c4
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/fee020c4

Branch: refs/heads/release-1.2
Commit: fee020c42849c6d395fd4c72941fe4bcfff08094
Parents: 8a5d56d
Author: Joerg Schad 
Authored: Fri Feb 17 16:10:53 2017 +0100
Committer: Stephan Ewen 
Committed: Mon Feb 20 02:07:54 2017 +0100

--
 docs/setup/mesos.md | 37 ++---
 1 file changed, 10 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/fee020c4/docs/setup/mesos.md
--
diff --git a/docs/setup/mesos.md b/docs/setup/mesos.md
index 032477a..450185d 100644
--- a/docs/setup/mesos.md
+++ b/docs/setup/mesos.md
@@ -34,7 +34,7 @@ set up by the application master. The most sophisticated 
component of the Mesos
 implementation is the application master. The application master currently 
hosts
 the following components:
 
-### Mesos Scheduler 
+### Mesos Scheduler
 
 The scheduler is responsible for registering the framework with Mesos,
 requesting resources, and launching worker nodes. The scheduler continuously
@@ -57,7 +57,7 @@ The artifact server is responsible for providing resources to 
the worker
 nodes. The resources can be anything from the Flink binaries to shared secrets
 or configuration files. For instance, in non-containered environments, the
 artifact server will provide the Flink binaries. What files will be served
-depends on the configuration overlay used. 
+depends on the configuration overlay used.
 
 ### Flink's JobManager and Web Interface
 
@@ -87,30 +87,13 @@ If you don't have a running DC/OS cluster, please follow the
 [instructions on how to install DC/OS on the official 
website](https://dcos.io/install/).
 
 Once you have a DC/OS cluster, you may install Flink through the DC/OS
-Universe. In the search prompt, just search for Flink. 
+Universe. In the search prompt, just search for Flink. Alternatively, you can 
use the DC/OS CLI:
 
-**Note**: At the time of this writing, Flink was not yet available in the
-Universe. Please use the following workaround in the meantime:
+dcos package install flink
 
-1. [Install the DC/OS CLI](https://dcos.io/docs/1.8/usage/cli/install/)
+Further information can be found in the
+[DC/OS examples 
documentation](https://github.com/dcos/examples/tree/master/1.8/flink).
 
-2. Add the Development Universe
-
-`./dcos marathon app add 
https://raw.githubusercontent.com/mesosphere/dcos-flink-service/Makman2/quickstart/universe-server.json`
-
-3. Add the local Universe repository:
-
-   `./dcos package repo add --index=0 dev-universe 
http://universe.marathon.mesos:8085/repo`
-
-4. Install Flink through the Universe page or using the `dcos` command:
-   
-   `./dcos package install flink`
-
-In order to execute a Flink job on a DC/OS hosted Flink cluster, you first 
have to find out the address of the launched JobManager.
-The JobManager address can be found out by opening the Flink service, going to 
*Job Manager* and then using the address specified under 
`jobmanager.rpc.address` and `jobmanager.rpc.port`.
-Now you can use this address to submit a job to your cluster via
-
-FLINK_HOME/bin/flink run -m address:port flink-job.jar
 
 ## Mesos without DC/OS
 
@@ -167,7 +150,7 @@ A more convenient and easier to maintain approach is to use 
Docker containers to
 This is controlled via the following configuration entries:
 
 mesos.resourcemanager.tasks.container.type: mesos _or_ docker
-
+
 If set to 'docker', specify the image name:
 
 mesos.resourcemanager.tasks.container.image.name: image_name
@@ -181,7 +164,7 @@ which manage the Flink processes in a Mesos cluster:
 1. `mesos-appmaster.sh`
This starts the Mesos application master which will register the Mesos 
scheduler.
It is also responsible for starting up the worker nodes.
-   
+
 2. `mesos-taskmanager.sh`
The entry point for the Mesos worker processes.
You don't need to explicitly execute this script.
@@ -241,14 +224,14 @@ When running Flink with Marathon, the whole Flink cluster 
including the job mana
 
 `mesos.maximum-failed-tasks`: The maximum number of failed workers before the 
cluster fails (**DEFAULT**: Number of initial workers).
 May be set to -1 to disable this feature.
-
+
 `mesos.master`: The Mesos master URL. The value should be in one of the 
following forms:
 
 * `host:port`
 * `zk://host1:port1,host2:port2,.../path`
 * 

[2/2] flink git commit: [FLINK-5828] [distributed runtime] Fix initialization of Blob storage directories

2017-02-19 Thread sewen
[FLINK-5828] [distributed runtime] Fix initialization of Blob storage 
directories

Flip the logic (check existence and create directory) to resolve currency 
problem

This closes #3342


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/8a5d56d4
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/8a5d56d4
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/8a5d56d4

Branch: refs/heads/release-1.2
Commit: 8a5d56d448db752c9779a32d5a6f907b0232b489
Parents: b21f9d1
Author: 士远 
Authored: Fri Feb 17 17:42:22 2017 +0800
Committer: Stephan Ewen 
Committed: Mon Feb 20 02:07:54 2017 +0100

--
 .../src/main/java/org/apache/flink/runtime/blob/BlobUtils.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/8a5d56d4/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobUtils.java
--
diff --git 
a/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobUtils.java 
b/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobUtils.java
index e74fa6f..a8cee2e 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobUtils.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobUtils.java
@@ -124,7 +124,7 @@ public class BlobUtils {
private static File getCacheDirectory(File storageDir) {
final File cacheDirectory = new File(storageDir, "cache");
 
-   if (!cacheDirectory.exists() && !cacheDirectory.mkdirs()) {
+   if (!cacheDirectory.mkdirs() && !cacheDirectory.exists()) {
throw new RuntimeException("Could not create cache 
directory '" + cacheDirectory.getAbsolutePath() + "'.");
}
 



[1/2] flink git commit: [FLINK-5631] [yarn] Support downloading additional jars from non-HDFS paths.

2017-02-13 Thread sewen
Repository: flink
Updated Branches:
  refs/heads/master 6bc6b225e -> 186b12309


[FLINK-5631] [yarn] Support downloading additional jars from non-HDFS paths.

This closes #3202


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/186b1230
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/186b1230
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/186b1230

Branch: refs/heads/master
Commit: 186b12309b540f82a055be28f3f005dce4b8cf46
Parents: 30c5b77
Author: Haohui Mai 
Authored: Tue Jan 31 12:11:01 2017 -0800
Committer: Stephan Ewen 
Committed: Mon Feb 13 20:51:50 2017 +0100

--
 .../main/java/org/apache/flink/yarn/Utils.java  | 223 +-
 .../flink/yarn/YarnApplicationMasterRunner.java | 236 +--
 .../apache/flink/yarn/YarnResourceManager.java  | 211 +
 .../java/org/apache/flink/yarn/UtilsTest.java   | 298 +++
 .../yarn/YarnApplicationMasterRunnerTest.java   |  93 ++
 .../yarn/YarnFlinkResourceManagerTest.java  | 298 ---
 6 files changed, 617 insertions(+), 742 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/186b1230/flink-yarn/src/main/java/org/apache/flink/yarn/Utils.java
--
diff --git a/flink-yarn/src/main/java/org/apache/flink/yarn/Utils.java 
b/flink-yarn/src/main/java/org/apache/flink/yarn/Utils.java
index 94d4582..60f7204 100644
--- a/flink-yarn/src/main/java/org/apache/flink/yarn/Utils.java
+++ b/flink-yarn/src/main/java/org/apache/flink/yarn/Utils.java
@@ -23,11 +23,16 @@ import java.io.IOException;
 import java.lang.reflect.InvocationTargetException;
 import java.nio.ByteBuffer;
 import java.util.Collection;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.UUID;
 
 import org.apache.flink.runtime.clusterframework.BootstrapTools;
+import 
org.apache.flink.runtime.clusterframework.ContaineredTaskManagerParameters;
+import org.apache.hadoop.yarn.api.ApplicationConstants;
+import org.apache.hadoop.yarn.util.Records;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -53,6 +58,8 @@ import 
org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.util.ConverterUtils;
 
+import static org.apache.flink.yarn.YarnConfigKeys.ENV_FLINK_CLASSPATH;
+
 /**
  * Utility class that provides helper methods to work with Apache Hadoop YARN.
  */
@@ -107,7 +114,7 @@ public final class Utils {
addToEnvironment(
appMasterEnv,
Environment.CLASSPATH.name(),
-   appMasterEnv.get(YarnConfigKeys.ENV_FLINK_CLASSPATH));
+   appMasterEnv.get(ENV_FLINK_CLASSPATH));
String[] applicationClassPathEntries = conf.getStrings(
YarnConfiguration.YARN_APPLICATION_CLASSPATH,
YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH);
@@ -264,4 +271,218 @@ public final class Utils {
}
return result;
}
+
+   /**
+* Creates the launch context, which describes how to bring up a 
TaskExecutor / TaskManager process in
+* an allocated YARN container.
+*
+* This code is extremely YARN specific and registers all the 
resources that the TaskExecutor
+* needs (such as JAR file, config file, ...) and all environment 
variables in a YARN
+* container launch context. The launch context then ensures that those 
resources will be
+* copied into the containers transient working directory.
+*
+* @param flinkConfig
+*   The Flink configuration object.
+* @param yarnConfig
+*   The YARN configuration object.
+* @param env
+*   The environment variables.
+* @param tmParams
+*   The TaskExecutor container memory parameters.
+* @param taskManagerConfig
+*   The configuration for the TaskExecutors.
+* @param workingDirectory
+*   The current application master container's working 
directory.
+* @param taskManagerMainClass
+*   The class with the main method.
+* @param log
+*   The logger.
+*
+* @return The launch context for the TaskManager processes.
+*
+* @throws Exception Thrown if teh launch context could not be created, 
for example if
+* the resources could not be copied.
+*/
+   static ContainerLaunchContext 

[2/2] flink git commit: [FLINK-5729] [examples] Add hostname option to SocketWindowWordCount examples

2017-02-13 Thread sewen
[FLINK-5729] [examples] Add hostname option to SocketWindowWordCount examples

This closes #3283


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/30c5b771
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/30c5b771
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/30c5b771

Branch: refs/heads/master
Commit: 30c5b771a7943e981dd5f67131c932fdb204fbc2
Parents: 6bc6b22
Author: WangTaoTheTonic 
Authored: Tue Feb 7 15:52:26 2017 +0800
Committer: Stephan Ewen 
Committed: Mon Feb 13 20:51:50 2017 +0100

--
 .../examples/socket/SocketWindowWordCount.java  | 17 ---
 .../examples/socket/SocketWindowWordCount.scala | 22 +---
 2 files changed, 24 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/30c5b771/flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/socket/SocketWindowWordCount.java
--
diff --git 
a/flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/socket/SocketWindowWordCount.java
 
b/flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/socket/SocketWindowWordCount.java
index dd2e061..250c5b9 100644
--- 
a/flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/socket/SocketWindowWordCount.java
+++ 
b/flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/socket/SocketWindowWordCount.java
@@ -35,23 +35,26 @@ import org.apache.flink.util.Collector;
  * 
  * nc -l 12345
  * 
- * and run this example with the port as an argument.
+ * and run this example with the hostname and the port as arguments.
  */
 @SuppressWarnings("serial")
 public class SocketWindowWordCount {
 
public static void main(String[] args) throws Exception {
 
-   // the port to connect to
+   // the host and the port to connect to
+   final String hostname;
final int port;
try {
final ParameterTool params = 
ParameterTool.fromArgs(args);
+   hostname = params.has("hostname") ? 
params.get("hostname") : "localhost";
port = params.getInt("port");
} catch (Exception e) {
-   System.err.println("No port specified. Please run 
'SocketWindowWordCount --port ', " +
-   "where port is the address of the text 
server");
-   System.err.println("To start a simple text server, run 
'netcat -l ' and type the input text " +
-   "into the command line");
+   System.err.println("No port specified. Please run 
'SocketWindowWordCount " +
+   "--hostname  --port ', where 
hostname (localhost by default) " +
+   "and port is the address of the text server");
+   System.err.println("To start a simple text server, run 
'netcat -l ' and " +
+   "type the input text into the command line");
return;
}
 
@@ -59,7 +62,7 @@ public class SocketWindowWordCount {
final StreamExecutionEnvironment env = 
StreamExecutionEnvironment.getExecutionEnvironment();
 
// get input data by connecting to the socket
-   DataStream text = env.socketTextStream("localhost", 
port, "\n");
+   DataStream text = env.socketTextStream(hostname, port, 
"\n");
 
// parse the data, group it, window it, and aggregate the counts
DataStream windowCounts = text

http://git-wip-us.apache.org/repos/asf/flink/blob/30c5b771/flink-examples/flink-examples-streaming/src/main/scala/org/apache/flink/streaming/scala/examples/socket/SocketWindowWordCount.scala
--
diff --git 
a/flink-examples/flink-examples-streaming/src/main/scala/org/apache/flink/streaming/scala/examples/socket/SocketWindowWordCount.scala
 
b/flink-examples/flink-examples-streaming/src/main/scala/org/apache/flink/streaming/scala/examples/socket/SocketWindowWordCount.scala
index 1761b84..d2afa4d 100644
--- 
a/flink-examples/flink-examples-streaming/src/main/scala/org/apache/flink/streaming/scala/examples/socket/SocketWindowWordCount.scala
+++ 
b/flink-examples/flink-examples-streaming/src/main/scala/org/apache/flink/streaming/scala/examples/socket/SocketWindowWordCount.scala
@@ -31,20 +31,26 @@ import org.apache.flink.streaming.api.windowing.time.Time
  * 
  * nc -l 12345
  * 
- * and run this example 

[2/2] flink git commit: [hotfix] [core] Add tests for Futures applying multiple functions

2017-02-10 Thread sewen
[hotfix] [core] Add tests for Futures applying multiple functions


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/61d7f15d
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/61d7f15d
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/61d7f15d

Branch: refs/heads/master
Commit: 61d7f15dc8500bc5350508fcbe47a0873452857b
Parents: f6709b4
Author: Stephan Ewen 
Authored: Fri Feb 10 13:14:21 2017 +0100
Committer: Stephan Ewen 
Committed: Fri Feb 10 18:50:11 2017 +0100

--
 .../runtime/concurrent/FlinkFutureTest.java | 82 
 1 file changed, 82 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/61d7f15d/flink-runtime/src/test/java/org/apache/flink/runtime/concurrent/FlinkFutureTest.java
--
diff --git 
a/flink-runtime/src/test/java/org/apache/flink/runtime/concurrent/FlinkFutureTest.java
 
b/flink-runtime/src/test/java/org/apache/flink/runtime/concurrent/FlinkFutureTest.java
index 25d010b..0bdc563 100644
--- 
a/flink-runtime/src/test/java/org/apache/flink/runtime/concurrent/FlinkFutureTest.java
+++ 
b/flink-runtime/src/test/java/org/apache/flink/runtime/concurrent/FlinkFutureTest.java
@@ -21,6 +21,7 @@ package org.apache.flink.runtime.concurrent;
 import org.apache.flink.runtime.concurrent.impl.FlinkCompletableFuture;
 import org.apache.flink.runtime.concurrent.impl.FlinkFuture;
 import org.apache.flink.util.TestLogger;
+
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -35,6 +36,7 @@ import java.util.concurrent.atomic.AtomicInteger;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
@@ -454,6 +456,86 @@ public class FlinkFutureTest extends TestLogger {
assertEquals(expectedLeftValue + expectedRightValue, result);
}
 
+   /**
+* Tests that multiple functions can be called on complete futures.
+*/
+   @Test(timeout = 1L)
+   public void testMultipleFunctionsOnCompleteFuture() throws Exception {
+   final FlinkCompletableFuture future = 
FlinkCompletableFuture.completed("test");
+
+   Future result1 = future.handleAsync(new 
BiFunction() {
+
+   @Override
+   public String apply(String s, Throwable throwable) {
+   return s != null ? s : throwable.getMessage();
+   }
+   }, executor);
+
+   Future result2 = future.thenAcceptAsync(new 
AcceptFunction() {
+   @Override
+   public void accept(String value) {}
+   }, executor);
+
+   assertEquals("test", result1.get());
+   assertNull(result2.get());
+   }
+
+   /**
+* Tests that multiple functions can be called on incomplete futures.
+*/
+   @Test(timeout = 1L)
+   public void testMultipleFunctionsOnIncompleteFuture() throws Exception {
+   final FlinkCompletableFuture future = new 
FlinkCompletableFuture<>();
+
+   Future result1 = future.handleAsync(new 
BiFunction() {
+   @Override
+   public String apply(String s, Throwable throwable) {
+   return s != null ? s : throwable.getMessage();
+   }
+   }, executor);
+
+   Future result2 = future.thenAcceptAsync(new 
AcceptFunction() {
+   @Override
+   public void accept(String value) {}
+   }, executor);
+
+   future.complete("value");
+
+   assertEquals("value", result1.get());
+   assertNull(result2.get());
+   }
+
+   /**
+* Tests that multiple functions can be called on complete futures.
+*/
+   @Test(timeout = 1)
+   public void testMultipleFunctionsExceptional() throws Exception {
+   final FlinkCompletableFuture future = new 
FlinkCompletableFuture<>();
+
+   Future result1 = future.handleAsync(new 
BiFunction() {
+   @Override
+   public String apply(String s, Throwable throwable) {
+   return s != null ? s : throwable.getMessage();
+   }
+   }, executor);
+
+   Future result2 = future.thenAcceptAsync(new 
AcceptFunction() {
+   @Override
+   

[1/2] flink git commit: [hotfix] [core] Improve FlinkFuture synchronous actions by avoiding creation of ExecutionContext

2017-02-10 Thread sewen
Repository: flink
Updated Branches:
  refs/heads/master f6709b4a4 -> ff3786663


[hotfix] [core] Improve FlinkFuture synchronous actions by avoiding creation of 
ExecutionContext


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/ff378666
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/ff378666
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/ff378666

Branch: refs/heads/master
Commit: ff3786663b7f1f8a09b5ad0666f55fb171d7f64c
Parents: 61d7f15
Author: Stephan Ewen 
Authored: Fri Feb 10 13:16:13 2017 +0100
Committer: Stephan Ewen 
Committed: Fri Feb 10 18:50:11 2017 +0100

--
 .../flink/runtime/concurrent/impl/FlinkFuture.java | 17 +
 1 file changed, 17 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/ff378666/flink-runtime/src/main/java/org/apache/flink/runtime/concurrent/impl/FlinkFuture.java
--
diff --git 
a/flink-runtime/src/main/java/org/apache/flink/runtime/concurrent/impl/FlinkFuture.java
 
b/flink-runtime/src/main/java/org/apache/flink/runtime/concurrent/impl/FlinkFuture.java
index dd7e8de..ab23fc5 100644
--- 
a/flink-runtime/src/main/java/org/apache/flink/runtime/concurrent/impl/FlinkFuture.java
+++ 
b/flink-runtime/src/main/java/org/apache/flink/runtime/concurrent/impl/FlinkFuture.java
@@ -24,6 +24,7 @@ import akka.dispatch.Mapper;
 import akka.dispatch.OnComplete;
 import akka.dispatch.Recover;
 import akka.japi.Procedure;
+
 import org.apache.flink.runtime.concurrent.AcceptFunction;
 import org.apache.flink.runtime.concurrent.ApplyFunction;
 import org.apache.flink.runtime.concurrent.CompletableFuture;
@@ -31,8 +32,10 @@ import org.apache.flink.runtime.concurrent.Executors;
 import org.apache.flink.runtime.concurrent.Future;
 import org.apache.flink.runtime.concurrent.BiFunction;
 import org.apache.flink.util.Preconditions;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
 import scala.Option;
 import scala.Tuple2;
 import scala.concurrent.Await;
@@ -59,6 +62,12 @@ public class FlinkFuture implements Future {
 
private static final Logger LOG = 
LoggerFactory.getLogger(FlinkFuture.class);
 
+   private static final Executor DIRECT_EXECUTOR = 
Executors.directExecutor();
+
+   private static final ExecutionContext DIRECT_EXECUTION_CONTEXT = 
executionContextFromExecutor(DIRECT_EXECUTOR);
+
+   // 

+
protected scala.concurrent.Future scalaFuture;
 
FlinkFuture() {
@@ -346,6 +355,14 @@ public class FlinkFuture implements Future {

//---
 
private static ExecutionContext createExecutionContext(final Executor 
executor) {
+   if (executor == DIRECT_EXECUTOR) {
+   return DIRECT_EXECUTION_CONTEXT;
+   } else {
+   return executionContextFromExecutor(executor);
+   }
+   }
+
+   private static ExecutionContext executionContextFromExecutor(final 
Executor executor) {
return ExecutionContexts$.MODULE$.fromExecutor(executor, new 
Procedure() {
@Override
public void apply(Throwable throwable) throws Exception 
{



[2/2] flink git commit: [FLINK-5790] [core] Followups and tests for the StateDescriptor changes

2017-02-15 Thread sewen
[FLINK-5790] [core] Followups and tests for the StateDescriptor changes


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/2045cc5f
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/2045cc5f
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/2045cc5f

Branch: refs/heads/master
Commit: 2045cc5f84ab39f18f423154c5620a79ac6d44ba
Parents: d47446c
Author: Stephan Ewen 
Authored: Tue Feb 14 15:32:03 2017 +0100
Committer: Stephan Ewen 
Committed: Wed Feb 15 13:10:30 2017 +0100

--
 .../api/common/state/ListStateDescriptor.java   | 45 +++
 .../common/typeutils/base/ListSerializer.java   | 50 ++--
 .../flink/api/java/typeutils/ListTypeInfo.java  | 45 ---
 .../common/typeutils/SerializerTestBase.java|  7 +-
 .../typeutils/base/ListSerializerTest.java  | 83 
 .../api/common/state/ListStateDescriptor.java   | 10 +--
 .../state/heap/HeapKeyedStateBackend.java   | 40 ++
 .../flink/runtime/state/heap/HeapListState.java | 29 ---
 .../runtime/state/heap/HeapListStateTest.java   |  7 +-
 9 files changed, 237 insertions(+), 79 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/2045cc5f/flink-core/src/main/java/org/apache/flink/api/common/state/ListStateDescriptor.java
--
diff --git 
a/flink-core/src/main/java/org/apache/flink/api/common/state/ListStateDescriptor.java
 
b/flink-core/src/main/java/org/apache/flink/api/common/state/ListStateDescriptor.java
index 2047e24..c03f8cb 100644
--- 
a/flink-core/src/main/java/org/apache/flink/api/common/state/ListStateDescriptor.java
+++ 
b/flink-core/src/main/java/org/apache/flink/api/common/state/ListStateDescriptor.java
@@ -27,8 +27,14 @@ import org.apache.flink.api.java.typeutils.ListTypeInfo;
 import java.util.List;
 
 /**
- * A {@link StateDescriptor} for {@link ListState}. This can be used to create 
a partitioned
- * list state using
+ * A {@link StateDescriptor} for {@link ListState}. This can be used to create 
state where the type
+ * is a list that can be appended and iterated over.
+ * 
+ * Using {@code ListState} is typically more efficient than manually 
maintaining a list in a
+ * {@link ValueState}, because the backing implementation can support 
efficient appends, rathern then
+ * replacing the full list on write.
+ * 
+ * To create keyed list state (on a KeyedStream), use 
  * {@link 
org.apache.flink.api.common.functions.RuntimeContext#getListState(ListStateDescriptor)}.
  *
  * @param  The type of the values that can be added to the list state.
@@ -46,7 +52,6 @@ public class ListStateDescriptor extends 
StateDescriptor(elementTypeClass), null);
}
@@ -57,7 +62,6 @@ public class ListStateDescriptor extends 
StateDescriptor(elementTypeInfo), null);
}
@@ -68,26 +72,39 @@ public class ListStateDescriptor extends 
StateDescriptor(typeSerializer), null);
}
 
+   // 

+
+   @Override
+   public ListState bind(StateBackend stateBackend) throws Exception {
+   return stateBackend.createListState(this);
+   }
+
+   /**
+* Gets the serializer for the elements contained in the list.
+* 
+* @return The serializer for the elements in the list.
+*/
public TypeSerializer getElementSerializer() {
-   if (!(serializer instanceof ListSerializer)) {
+   // call getSerializer() here to get the initialization check 
and proper error message
+   final TypeSerializer rawSerializer = getSerializer();
+   if (!(rawSerializer instanceof ListSerializer)) {
throw new IllegalStateException();
}
 
return ((ListSerializer)serializer).getElementSerializer();
}
 
-   // 

-
@Override
-   public ListState bind(StateBackend stateBackend) throws Exception {
-   return stateBackend.createListState(this);
+   public Type getType() {
+   return Type.LIST;
}
 
+   // 

+
@Override
public boolean equals(Object o) {
if (this == o) {
@@ -97,8 +114,7 @@ public class ListStateDescriptor extends 
StateDescriptor

[1/2] flink git commit: [FLINK-5790] [core] Use list types when ListStateDescriptor extends StateDescriptor

2017-02-15 Thread sewen
Repository: flink
Updated Branches:
  refs/heads/master ded7faeab -> 2045cc5f8


[FLINK-5790] [core] Use list types when ListStateDescriptor extends 
StateDescriptor

This closes #3305


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/d47446ca
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/d47446ca
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/d47446ca

Branch: refs/heads/master
Commit: d47446cafffe0d34d89488f6eb860aa139ceb3f1
Parents: ded7fae
Author: xiaogang.sxg 
Authored: Tue Feb 14 13:39:30 2017 +0800
Committer: Stephan Ewen 
Committed: Wed Feb 15 12:21:07 2017 +0100

--
 .../streaming/state/RocksDBListState.java   |   4 +-
 .../api/common/state/ListStateDescriptor.java   |  35 +++--
 .../flink/api/common/state/StateDescriptor.java |  18 +--
 .../common/typeutils/base/ListSerializer.java   | 131 +++
 .../flink/api/java/typeutils/ListTypeInfo.java  | 116 
 .../util/MigrationInstantiationUtil.java|   2 +-
 .../common/state/ListStateDescriptorTest.java   |  25 +++-
 .../api/common/state/ListStateDescriptor.java   | 110 
 .../runtime/state/ArrayListSerializer.java  |  10 +-
 .../state/DefaultOperatorStateBackend.java  |   6 +-
 .../state/heap/HeapKeyedStateBackend.java   |  17 +--
 .../flink/runtime/state/heap/HeapListState.java |  25 ++--
 .../runtime/state/StateBackendTestBase.java |   2 +-
 .../runtime/state/heap/HeapListStateTest.java   |   8 +-
 ...ccumulatingProcessingTimeWindowOperator.java |   8 +-
 .../operators/StateDescriptorPassingTest.java   |  28 +++-
 .../operators/StreamingRuntimeContextTest.java  |  10 +-
 17 files changed, 476 insertions(+), 79 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/d47446ca/flink-contrib/flink-statebackend-rocksdb/src/main/java/org/apache/flink/contrib/streaming/state/RocksDBListState.java
--
diff --git 
a/flink-contrib/flink-statebackend-rocksdb/src/main/java/org/apache/flink/contrib/streaming/state/RocksDBListState.java
 
b/flink-contrib/flink-statebackend-rocksdb/src/main/java/org/apache/flink/contrib/streaming/state/RocksDBListState.java
index e6988f7..a8b20d1 100644
--- 
a/flink-contrib/flink-statebackend-rocksdb/src/main/java/org/apache/flink/contrib/streaming/state/RocksDBListState.java
+++ 
b/flink-contrib/flink-statebackend-rocksdb/src/main/java/org/apache/flink/contrib/streaming/state/RocksDBListState.java
@@ -47,7 +47,7 @@ import java.util.List;
  * @param  The type of the values in the list state.
  */
 public class RocksDBListState
-   extends AbstractRocksDBState
+   extends AbstractRocksDBState
implements InternalListState {
 
/** Serializer for the values */
@@ -72,7 +72,7 @@ public class RocksDBListState
RocksDBKeyedStateBackend backend) {
 
super(columnFamily, namespaceSerializer, stateDesc, backend);
-   this.valueSerializer = stateDesc.getSerializer();
+   this.valueSerializer = stateDesc.getElementSerializer();
 
writeOptions = new WriteOptions();
writeOptions.setDisableWAL(true);

http://git-wip-us.apache.org/repos/asf/flink/blob/d47446ca/flink-core/src/main/java/org/apache/flink/api/common/state/ListStateDescriptor.java
--
diff --git 
a/flink-core/src/main/java/org/apache/flink/api/common/state/ListStateDescriptor.java
 
b/flink-core/src/main/java/org/apache/flink/api/common/state/ListStateDescriptor.java
index 6861a07..2047e24 100644
--- 
a/flink-core/src/main/java/org/apache/flink/api/common/state/ListStateDescriptor.java
+++ 
b/flink-core/src/main/java/org/apache/flink/api/common/state/ListStateDescriptor.java
@@ -21,6 +21,10 @@ package org.apache.flink.api.common.state;
 import org.apache.flink.annotation.PublicEvolving;
 import org.apache.flink.api.common.typeinfo.TypeInformation;
 import org.apache.flink.api.common.typeutils.TypeSerializer;
+import org.apache.flink.api.common.typeutils.base.ListSerializer;
+import org.apache.flink.api.java.typeutils.ListTypeInfo;
+
+import java.util.List;
 
 /**
  * A {@link StateDescriptor} for {@link ListState}. This can be used to create 
a partitioned
@@ -30,8 +34,8 @@ import org.apache.flink.api.common.typeutils.TypeSerializer;
  * @param  The type of the values that can be added to the list state.
  */
 @PublicEvolving
-public class ListStateDescriptor extends StateDescriptor {
-   private static final long serialVersionUID = 1L;
+public class 

[1/3] flink git commit: [FLINK-5788] [docs] Improve documentation of FileSystem and specify the data persistence contract.

2017-02-14 Thread sewen
Repository: flink
Updated Branches:
  refs/heads/master 663c1e3f7 -> f7af3b016


[FLINK-5788] [docs] Improve documentation of FileSystem and specify the data 
persistence contract.

This closes #3301


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/f7af3b01
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/f7af3b01
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/f7af3b01

Branch: refs/heads/master
Commit: f7af3b01681592787db16a555b55d6b11d35f869
Parents: af81beb
Author: Stephan Ewen 
Authored: Mon Feb 13 14:29:03 2017 +0100
Committer: Stephan Ewen 
Committed: Tue Feb 14 15:32:43 2017 +0100

--
 docs/internals/filesystems.md   | 138 +++
 .../apache/flink/core/fs/FSDataInputStream.java |  11 +-
 .../flink/core/fs/FSDataOutputStream.java   |  81 ++-
 .../org/apache/flink/core/fs/FileSystem.java|  98 -
 4 files changed, 323 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/f7af3b01/docs/internals/filesystems.md
--
diff --git a/docs/internals/filesystems.md b/docs/internals/filesystems.md
new file mode 100644
index 000..427251a
--- /dev/null
+++ b/docs/internals/filesystems.md
@@ -0,0 +1,138 @@
+---
+title: "File Systems"
+nav-parent_id: internals
+nav-pos: 10
+---
+
+
+* Replaced by the TOC
+{:toc}
+
+Flink has its own file system abstraction via the 
`org.apache.flink.core.fs.FileSystem` class.
+This abstraction provides a common set of operations and minimal guarantees 
across various types
+of file system implementations.
+
+The `FileSystem`'s set of available operations is quite limited, in order to 
support a wide
+range of file systems. For example, appending to or mutating existing files is 
not supported.
+
+File systems are identified by a *file system scheme*, such as `file://`, 
`hdfs://`, etc.
+
+# Implementations
+
+Flink implements the file systems directly, with the following file system 
schemes:
+
+  - `file`, which represents the machine's local file system.
+
+Other file system types are accessed by an implementation that bridges to the 
suite of file systems supported by
+[Apache Hadoop](https://hadoop.apache.org/). The following is an incomplete 
list of examples:
+
+  - `hdfs`: Hadoop Distributed File System
+  - `s3`, `s3n`, and `s3a`: Amazon S3 file system
+  - `gcs`: Google Cloud Storage
+  - `maprfs`: The MapR distributed file system
+  - ...
+
+Flink loads Hadoop's file systems transparently if it finds the Hadoop File 
System classes in the class path and finds a valid
+Hadoop configuration. By default, it looks for the Hadoop configuration in the 
class path. Alternatively, one can specify a
+custom location via the configuration entry `fs.hdfs.hadoopconf`.
+
+
+# Persistence Guarantees
+
+These `FileSystem` and its `FsDataOutputStream` instances are used to 
persistently store data, both for results of applications
+and for fault tolerance and recovery. It is therefore crucial that the 
persistence semantics of these streams are well defined.
+
+## Definition of Persistence Guarantees
+
+Data written to an output stream is considered persistent, if two requirements 
are met:
+
+  1. **Visibility Requirement:** It must be guaranteed that all other 
processes, machines,
+ virtual machines, containers, etc. that are able to access the file see 
the data consistently
+ when given the absolute file path. This requirement is similar to the 
*close-to-open*
+ semantics defined by POSIX, but restricted to the file itself (by its 
absolute path).
+
+  2. **Durability Requirement:** The file system's specific 
durability/persistence requirements
+ must be met. These are specific to the particular file system. For 
example the
+ {@link LocalFileSystem} does not provide any durability guarantees for 
crashes of both
+ hardware and operating system, while replicated distributed file systems 
(like HDFS)
+ guarantee typically durability in the presence of up *n* concurrent node 
failures,
+ where *n* is the replication factor.
+
+Updates to the file's parent directory (such that the file shows up when
+listing the directory contents) are not required to be complete for the data 
in the file stream
+to be considered persistent. This relaxation is important for file systems 
where updates to
+directory contents are only eventually consistent.
+
+The `FSDataOutputStream` has to guarantee data persistence for the written 
bytes once the call to
+`FSDataOutputStream.close()` returns.
+
+## Examples
+ 
+  - For **fault-tolerant distributed file systems**, data is considered 
persistent once 
+it has been received and acknowledged by the file 

[3/3] flink git commit: [FLINK-5762] [runtime] Protect initializeState() and open() by the same lock

2017-02-14 Thread sewen
[FLINK-5762] [runtime] Protect initializeState() and open() by the same lock

This closes #3291


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/a91b6ff0
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/a91b6ff0
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/a91b6ff0

Branch: refs/heads/master
Commit: a91b6ff05d8af870ad076f9bf0fc17886787bc46
Parents: 663c1e3
Author: kl0u 
Authored: Thu Feb 9 16:02:27 2017 +0100
Committer: Stephan Ewen 
Committed: Tue Feb 14 15:32:43 2017 +0100

--
 .../apache/flink/streaming/runtime/tasks/StreamTask.java| 9 ++---
 1 file changed, 6 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/a91b6ff0/flink-streaming-java/src/main/java/org/apache/flink/streaming/runtime/tasks/StreamTask.java
--
diff --git 
a/flink-streaming-java/src/main/java/org/apache/flink/streaming/runtime/tasks/StreamTask.java
 
b/flink-streaming-java/src/main/java/org/apache/flink/streaming/runtime/tasks/StreamTask.java
index 2676b64..3781cb6 100644
--- 
a/flink-streaming-java/src/main/java/org/apache/flink/streaming/runtime/tasks/StreamTask.java
+++ 
b/flink-streaming-java/src/main/java/org/apache/flink/streaming/runtime/tasks/StreamTask.java
@@ -244,12 +244,15 @@ public abstract class StreamTask
//  Invoke 
LOG.debug("Invoking {}", getName());
 
-   // first order of business is to give operators their 
state
-   initializeState();
-
// we need to make sure that any triggers scheduled in 
open() cannot be
// executed before all operators are opened
synchronized (lock) {
+
+   // both the following operations are protected 
by the lock
+   // so that we avoid race conditions in the case 
that initializeState()
+   // registers a timer, that fires before the 
open() is called.
+
+   initializeState();
openAllOperators();
}
 



[2/3] flink git commit: [FLINK-5553] [network] keep the original throwable in PartitionRequestClientHandler

2017-02-14 Thread sewen
[FLINK-5553] [network] keep the original throwable in 
PartitionRequestClientHandler

This way, when checking for a previous error in any input channel, we can throw
a meaningful exception instead of the inspecific
IllegalStateException("There has been an error in the channel.") before.

Note that the original throwable (from an existing channel) may or may not(!)
have been printed by the InputGate yet. Any new input channel, however, did not
get the Throwable and must fail through the (now enhanced) fallback mechanism.

This closes #3299


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/af81bebd
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/af81bebd
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/af81bebd

Branch: refs/heads/master
Commit: af81bebd0fabc6390930689df131e72edab6995b
Parents: a91b6ff
Author: Nico Kruber 
Authored: Mon Feb 13 16:30:59 2017 +0100
Committer: Stephan Ewen 
Committed: Tue Feb 14 15:32:43 2017 +0100

--
 .../netty/PartitionRequestClientHandler.java| 27 +++-
 .../netty/ClientTransportErrorHandlingTest.java |  3 ++-
 2 files changed, 22 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/af81bebd/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/PartitionRequestClientHandler.java
--
diff --git 
a/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/PartitionRequestClientHandler.java
 
b/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/PartitionRequestClientHandler.java
index 52775d4..9f80abc 100644
--- 
a/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/PartitionRequestClientHandler.java
+++ 
b/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/PartitionRequestClientHandler.java
@@ -42,18 +42,15 @@ import java.util.ArrayDeque;
 import java.util.Queue;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicReference;
 
-import static org.apache.flink.util.Preconditions.checkState;
-
 class PartitionRequestClientHandler extends ChannelInboundHandlerAdapter {
 
private static final Logger LOG = 
LoggerFactory.getLogger(PartitionRequestClientHandler.class);
 
private final ConcurrentMap 
inputChannels = new ConcurrentHashMap();
 
-   private final AtomicBoolean channelError = new AtomicBoolean(false);
+   private final AtomicReference channelError = new 
AtomicReference();
 
private final BufferListenerTask bufferListener = new 
BufferListenerTask();
 
@@ -73,8 +70,8 @@ class PartitionRequestClientHandler extends 
ChannelInboundHandlerAdapter {
// Input channel/receiver registration
// 

 
-   void addInputChannel(RemoteInputChannel listener) {
-   checkState(!channelError.get(), "There has been an error in the 
channel.");
+   void addInputChannel(RemoteInputChannel listener) throws IOException {
+   checkError();
 
if (!inputChannels.containsKey(listener.getInputChannelId())) {
inputChannels.put(listener.getInputChannelId(), 
listener);
@@ -172,7 +169,7 @@ class PartitionRequestClientHandler extends 
ChannelInboundHandlerAdapter {
}
 
private void notifyAllChannelsOfErrorAndClose(Throwable cause) {
-   if (channelError.compareAndSet(false, true)) {
+   if (channelError.compareAndSet(null, cause)) {
try {
for (RemoteInputChannel inputChannel : 
inputChannels.values()) {
inputChannel.onError(cause);
@@ -195,6 +192,22 @@ class PartitionRequestClientHandler extends 
ChannelInboundHandlerAdapter {
 
// 

 
+   /**
+* Checks for an error and rethrows it if one was reported.
+*/
+   private void checkError() throws IOException {
+   final Throwable t = channelError.get();
+
+   if (t != null) {
+   if (t instanceof IOException) {
+   throw (IOException) t;
+   }
+   else {
+   throw new IOException("There has been an error 
in the channel.", t);
+   }
+   }
+   }
+
@Override
public void 

[2/2] flink git commit: [FLINK-5132] [core] Introduce the ResourceSpec to define required resource factors in API

2017-02-09 Thread sewen
[FLINK-5132] [core] Introduce the ResourceSpec to define required resource 
factors in API

This closes #3114


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/fd872a14
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/fd872a14
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/fd872a14

Branch: refs/heads/master
Commit: fd872a14974cad14c63478e44edb4e508dfb4069
Parents: cc27f08
Author: 淘江 
Authored: Fri Jan 13 18:40:47 2017 +0800
Committer: Stephan Ewen 
Committed: Thu Feb 9 15:12:00 2017 +0100

--
 .../api/common/operators/ResourceSpec.java  | 198 +++
 1 file changed, 198 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/fd872a14/flink-core/src/main/java/org/apache/flink/api/common/operators/ResourceSpec.java
--
diff --git 
a/flink-core/src/main/java/org/apache/flink/api/common/operators/ResourceSpec.java
 
b/flink-core/src/main/java/org/apache/flink/api/common/operators/ResourceSpec.java
new file mode 100644
index 000..1387508
--- /dev/null
+++ 
b/flink-core/src/main/java/org/apache/flink/api/common/operators/ResourceSpec.java
@@ -0,0 +1,198 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.api.common.operators;
+
+import org.apache.flink.annotation.Internal;
+
+import javax.annotation.Nonnull;
+import java.io.Serializable;
+
+/**
+ * Describe the different resource factors of the operator with UDF.
+ *
+ * The state backend provides the method to estimate memory usages based on 
state size in the resource.
+ *
+ * Resource provides {@link #merge(ResourceSpec)} method for chained operators 
when generating job graph.
+ *
+ * Resource provides {@link #lessThanOrEqual(ResourceSpec)} method to 
compare these fields in sequence:
+ * 
+ * CPU cores
+ * Heap Memory Size
+ * Direct Memory Size
+ * Native Memory Size
+ * State Size
+ * 
+ */
+@Internal
+public class ResourceSpec implements Serializable {
+
+   private static final long serialVersionUID = 1L;
+
+   public static final ResourceSpec UNKNOWN = new ResourceSpec(0, 0, 0, 0, 
0);
+
+   /** How many cpu cores are needed, use double so we can specify cpu 
like 0.1 */
+   private final double cpuCores;
+
+   /** How many java heap memory in mb are needed */
+   private final int heapMemoryInMB;
+
+   /** How many nio direct memory in mb are needed */
+   private final int directMemoryInMB;
+
+   /** How many native memory in mb are needed */
+   private final int nativeMemoryInMB;
+
+   /** How many state size in mb are used */
+   private final int stateSizeInMB;
+
+   /**
+* Creates a new ResourceSpec with basic common resources.
+*
+* @param cpuCores The number of CPU cores (possibly fractional, i.e., 
0.2 cores)
+* @param heapMemoryInMB The size of the java heap memory, in megabytes.
+*/
+   public ResourceSpec(double cpuCores, int heapMemoryInMB) {
+   this.cpuCores = cpuCores;
+   this.heapMemoryInMB = heapMemoryInMB;
+   this.directMemoryInMB = 0;
+   this.nativeMemoryInMB = 0;
+   this.stateSizeInMB = 0;
+   }
+
+   /**
+* Creates a new ResourceSpec with full resources.
+*
+* @param cpuCores The number of CPU cores (possibly fractional, i.e., 
0.2 cores)
+* @param heapMemoryInMB The size of the java heap memory, in megabytes.
+* @param directMemoryInMB The size of the java nio direct memory, in 
megabytes.
+* @param nativeMemoryInMB The size of the native memory, in megabytes.
+* @param stateSizeInMB The state size for storing in checkpoint.
+*/
+   public ResourceSpec(
+   double cpuCores,
+   int heapMemoryInMB,
+   int directMemoryInMB,
+   int 

[2/2] flink git commit: [FLINK-5748] [jobmanager] Make the 'future executor' a ScheduledExecutorService

2017-02-09 Thread sewen
[FLINK-5748] [jobmanager] Make the 'future executor' a ScheduledExecutorService

This closes #3289


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/665c7e39
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/665c7e39
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/665c7e39

Branch: refs/heads/master
Commit: 665c7e399928188b22a7963cc05654589d47941c
Parents: 95765b6
Author: Stephan Ewen 
Authored: Wed Feb 8 20:51:46 2017 +0100
Committer: Stephan Ewen 
Committed: Thu Feb 9 14:36:35 2017 +0100

--
 .../MesosApplicationMasterRunner.java   |   5 +-
 .../clusterframework/MesosJobManager.scala  |   4 +-
 .../BackPressureStatsTrackerITCase.java |   4 +-
 .../StackTraceSampleCoordinatorITCase.java  |   4 +-
 .../webmonitor/WebRuntimeMonitorITCase.java |   5 +-
 .../runtime/executiongraph/ExecutionGraph.java  |  11 +-
 .../executiongraph/ExecutionGraphBuilder.java   |   6 +-
 .../runtime/jobmaster/JobManagerServices.java   |  12 ++-
 .../flink/runtime/jobmaster/JobMaster.java  |   3 +-
 .../ContaineredJobManager.scala |   4 +-
 .../flink/runtime/jobmanager/JobManager.scala   |  14 +--
 .../runtime/minicluster/FlinkMiniCluster.scala  |   4 +-
 .../minicluster/LocalFlinkMiniCluster.scala |   4 +-
 ...ExecutionGraphCheckpointCoordinatorTest.java |   4 +-
 .../clusterframework/ClusterShutdownITCase.java |  13 +--
 .../clusterframework/ResourceManagerITCase.java |   8 +-
 .../ArchivedExecutionGraphTest.java |   4 +-
 .../ExecutionGraphConstructionTest.java |  32 +++---
 .../ExecutionGraphDeploymentTest.java   |  13 +--
 .../ExecutionGraphMetricsTest.java  |   4 +-
 .../ExecutionGraphRestartTest.java  |  12 +--
 .../ExecutionGraphSignalsTest.java  |   4 +-
 .../executiongraph/ExecutionGraphTestUtils.java |   9 +-
 .../ExecutionStateProgressTest.java |   4 +-
 .../ExecutionVertexCancelTest.java  |  18 ++--
 .../ExecutionVertexDeploymentTest.java  |   5 +-
 .../ExecutionVertexLocalityTest.java|   6 +-
 .../executiongraph/LegacyJobVertexIdTest.java   |   3 +-
 .../executiongraph/PointwisePatternTest.java|  28 ++---
 .../TerminalStateDeadlockTest.java  |   4 +-
 .../executiongraph/VertexSlotSharingTest.java   |   4 +-
 .../jobmanager/JobManagerHARecoveryTest.java|  24 ++---
 .../runtime/jobmanager/JobManagerTest.java  |  20 ++--
 .../flink/runtime/jobmanager/JobSubmitTest.java |   5 +-
 .../JobManagerLeaderElectionTest.java   |  17 ++-
 .../runtime/metrics/TaskManagerMetricsTest.java |   5 +-
 ...askManagerComponentsStartupShutdownTest.java |   6 +-
 .../TaskManagerProcessReapingTestBase.java  |   5 +-
 .../TaskManagerRegistrationTest.java|  12 +--
 .../DirectScheduledExecutorService.java | 107 +++
 .../TaskManagerLossFailsTasksTest.scala |  14 ++-
 .../jobmanager/JobManagerRegistrationTest.scala |   8 +-
 .../runtime/testingUtils/TestingCluster.scala   |   4 +-
 .../testingUtils/TestingJobManager.scala|   4 +-
 .../runtime/testingUtils/TestingUtils.scala |  86 +++
 .../partitioner/RescalePartitionerTest.java |   4 +-
 ...ctTaskManagerProcessFailureRecoveryTest.java |   5 +-
 .../recovery/ProcessFailureCancelingITCase.java |   5 +-
 .../flink/yarn/TestingYarnJobManager.scala  |   4 +-
 .../flink/yarn/YarnApplicationMasterRunner.java |   3 +-
 .../org/apache/flink/yarn/YarnJobManager.scala  |   4 +-
 51 files changed, 385 insertions(+), 212 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/665c7e39/flink-mesos/src/main/java/org/apache/flink/mesos/runtime/clusterframework/MesosApplicationMasterRunner.java
--
diff --git 
a/flink-mesos/src/main/java/org/apache/flink/mesos/runtime/clusterframework/MesosApplicationMasterRunner.java
 
b/flink-mesos/src/main/java/org/apache/flink/mesos/runtime/clusterframework/MesosApplicationMasterRunner.java
index de76d8e..5033692 100644
--- 
a/flink-mesos/src/main/java/org/apache/flink/mesos/runtime/clusterframework/MesosApplicationMasterRunner.java
+++ 
b/flink-mesos/src/main/java/org/apache/flink/mesos/runtime/clusterframework/MesosApplicationMasterRunner.java
@@ -72,6 +72,7 @@ import java.util.UUID;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.TimeUnit;
 
 import static org.apache.flink.util.Preconditions.checkState;
@@ -195,7 +196,7 @@ public class MesosApplicationMasterRunner {
ActorSystem actorSystem = null;

[1/2] flink git commit: [FLINK-5748] [jobmanager] Make the 'future executor' a ScheduledExecutorService

2017-02-09 Thread sewen
Repository: flink
Updated Branches:
  refs/heads/master 95765b6d8 -> 665c7e399


http://git-wip-us.apache.org/repos/asf/flink/blob/665c7e39/flink-runtime/src/test/scala/org/apache/flink/runtime/testingUtils/TestingUtils.scala
--
diff --git 
a/flink-runtime/src/test/scala/org/apache/flink/runtime/testingUtils/TestingUtils.scala
 
b/flink-runtime/src/test/scala/org/apache/flink/runtime/testingUtils/TestingUtils.scala
index 12dab93..20260c7 100644
--- 
a/flink-runtime/src/test/scala/org/apache/flink/runtime/testingUtils/TestingUtils.scala
+++ 
b/flink-runtime/src/test/scala/org/apache/flink/runtime/testingUtils/TestingUtils.scala
@@ -18,8 +18,9 @@
 
 package org.apache.flink.runtime.testingUtils
 
-import java.util.UUID
-import java.util.concurrent.Executor
+import java.util
+import java.util.{Collections, UUID}
+import java.util.concurrent._
 
 import akka.actor.{ActorRef, ActorSystem, Kill, Props}
 import akka.pattern.ask
@@ -42,8 +43,9 @@ import 
org.apache.flink.runtime.testutils.TestingResourceManager
 import org.apache.flink.runtime.util.LeaderRetrievalUtils
 import org.apache.flink.runtime.{FlinkActor, LeaderSessionMessageFilter, 
LogMessages}
 
+import scala.concurrent.duration.TimeUnit
 import scala.concurrent.duration._
-import scala.concurrent.{Await, ExecutionContext}
+import scala.concurrent.{ExecutionContextExecutor, Await, ExecutionContext}
 import scala.language.postfixOps
 
 /**
@@ -51,8 +53,10 @@ import scala.language.postfixOps
  */
 object TestingUtils {
 
-  val testConfig = 
ConfigFactory.parseString(getDefaultTestingActorSystemConfigString)
+  private var sharedExecutorInstance: ScheduledExecutorService = _
 
+  val testConfig = 
ConfigFactory.parseString(getDefaultTestingActorSystemConfigString)
+  
   val TESTING_DURATION = 2 minute
 
   val DEFAULT_AKKA_ASK_TIMEOUT = "200 s"
@@ -87,12 +91,25 @@ object TestingUtils {
 cluster
   }
 
-  /** Returns the global [[ExecutionContext]] which is a 
[[scala.concurrent.forkjoin.ForkJoinPool]]
-* with a default parallelism equal to the number of available cores.
-*
-* @return ExecutionContext.global
+  /** 
+* Gets the shared global testing execution context 
 */
-  def defaultExecutionContext = ExecutionContext.global
+  def defaultExecutionContext: ExecutionContextExecutor = {
+ExecutionContext.fromExecutor(defaultExecutor)
+  }
+
+  /**
+   * Gets the shared global testing scheduled executor
+   */
+  def defaultExecutor: ScheduledExecutorService = {
+synchronized {
+  if (sharedExecutorInstance == null || sharedExecutorInstance.isShutdown) 
{
+sharedExecutorInstance = Executors.newSingleThreadScheduledExecutor()
+  }
+
+  sharedExecutorInstance
+}
+  }
 
   /** Returns an [[ExecutionContext]] which uses the current thread to execute 
the runnable.
 *
@@ -108,11 +125,9 @@ object TestingUtils {
   /** [[ExecutionContext]] which queues [[Runnable]] up in an [[ActionQueue]] 
instead of
 * execution them. If the automatic execution mode is activated, then the 
[[Runnable]] are
 * executed.
-*
-* @param actionQueue
 */
   class QueuedActionExecutionContext private[testingUtils] (val actionQueue: 
ActionQueue)
-extends ExecutionContext with Executor {
+extends AbstractExecutorService with ExecutionContext with 
ScheduledExecutorService {
 
 var automaticExecution = false
 
@@ -131,18 +146,53 @@ object TestingUtils {
 override def reportFailure(t: Throwable): Unit = {
   t.printStackTrace()
 }
+
+override def scheduleAtFixedRate(
+command: Runnable,
+initialDelay: Long,
+period: Long,
+unit: TimeUnit): ScheduledFuture[_] = {
+  throw new UnsupportedOperationException()
+}
+
+override def schedule(command: Runnable, delay: Long, unit: TimeUnit): 
ScheduledFuture[_] = {
+  throw new UnsupportedOperationException()
+}
+
+override def schedule[V](callable: Callable[V], delay: Long, unit: 
TimeUnit)
+: ScheduledFuture[V] = {
+  throw new UnsupportedOperationException()
+}
+
+override def scheduleWithFixedDelay(
+command: Runnable,
+initialDelay: Long,
+delay: Long,
+unit: TimeUnit): ScheduledFuture[_] = {
+  throw new UnsupportedOperationException()
+}
+
+override def shutdown(): Unit = ()
+
+override def isTerminated: Boolean = false
+
+override def awaitTermination(timeout: Long, unit: TimeUnit): Boolean = 
false
+
+override def shutdownNow(): util.List[Runnable] = Collections.emptyList()
+
+override def isShutdown: Boolean = false
   }
 
   /** Queue which stores [[Runnable]] */
   class ActionQueue {
 private val runnables = scala.collection.mutable.Queue[Runnable]()
 
-def triggerNextAction {
+def triggerNextAction() {
   val r = runnables.dequeue
   r.run()
 }
 
-def popNextAction: Runnable = {
+def 

[1/2] flink git commit: [FLINK-4912] Introduce RECONCILIATING state in ExecutionGraph and Execution for JobManager failure recovery

2017-02-09 Thread sewen
Repository: flink
Updated Branches:
  refs/heads/master 665c7e399 -> fd872a149


[FLINK-4912] Introduce RECONCILIATING state in ExecutionGraph and Execution for 
JobManager failure recovery

This closes #3113


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/cc27f080
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/cc27f080
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/cc27f080

Branch: refs/heads/master
Commit: cc27f0803f4ed1d9799594c75ac00d0e14447479
Parents: 665c7e3
Author: 淘江 
Authored: Fri Jan 13 16:41:37 2017 +0800
Committer: Stephan Ewen 
Committed: Thu Feb 9 15:03:47 2017 +0100

--
 .../flink/runtime/execution/ExecutionState.java | 24 +---
 .../flink/runtime/jobgraph/JobStatus.java   |  5 +++-
 2 files changed, 20 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/cc27f080/flink-runtime/src/main/java/org/apache/flink/runtime/execution/ExecutionState.java
--
diff --git 
a/flink-runtime/src/main/java/org/apache/flink/runtime/execution/ExecutionState.java
 
b/flink-runtime/src/main/java/org/apache/flink/runtime/execution/ExecutionState.java
index e3e3256..d6ff0cd 100644
--- 
a/flink-runtime/src/main/java/org/apache/flink/runtime/execution/ExecutionState.java
+++ 
b/flink-runtime/src/main/java/org/apache/flink/runtime/execution/ExecutionState.java
@@ -25,16 +25,23 @@ package org.apache.flink.runtime.execution;
  * {@code
  *
  * CREATED  -> SCHEDULED -> DEPLOYING -> RUNNING -> FINISHED
- * ||  |
- * ||   +--+
- * |V   V
- * | CANCELLING -+> CANCELED
- * | |
- * +-+
+ *| ||  |
+ *| ||   +--+
+ *| |V   V
+ *| | CANCELLING -+> CANCELED
+ *| | |
+ *|+-+
+ *|
+ *|   ... -> FAILED
+ *   V
+ *RECONCILING  -> RUNNING | FINISHED | CANCELED | FAILED
  *
- *   ... -> FAILED
  * }
  *
+ * It is possible to enter the {@code RECONCILING} state from {@code 
CREATED}
+ * state if job manager fail over, and the {@code RECONCILING} state can 
switch into
+ * any existing task state.
+ *
  * It is possible to enter the {@code FAILED} state from any other 
state.
  *
  * The states {@code FINISHED}, {@code CANCELED}, and {@code FAILED} are
@@ -56,8 +63,9 @@ public enum ExecutionState {

CANCELED,

-   FAILED;
+   FAILED,
 
+   RECONCILING;
 
public boolean isTerminal() {
return this == FINISHED || this == CANCELED || this == FAILED;

http://git-wip-us.apache.org/repos/asf/flink/blob/cc27f080/flink-runtime/src/main/java/org/apache/flink/runtime/jobgraph/JobStatus.java
--
diff --git 
a/flink-runtime/src/main/java/org/apache/flink/runtime/jobgraph/JobStatus.java 
b/flink-runtime/src/main/java/org/apache/flink/runtime/jobgraph/JobStatus.java
index 236a217..6a0ac97 100644
--- 
a/flink-runtime/src/main/java/org/apache/flink/runtime/jobgraph/JobStatus.java
+++ 
b/flink-runtime/src/main/java/org/apache/flink/runtime/jobgraph/JobStatus.java
@@ -51,7 +51,10 @@ public enum JobStatus {
 * The job has been suspended which means that it has been stopped but 
not been removed from a
 * potential HA job store.
 */
-   SUSPENDED(TerminalState.LOCALLY);
+   SUSPENDED(TerminalState.LOCALLY),
+
+   /** The job is currently reconciling and waits for task execution 
report to recover state. */
+   RECONCILING(TerminalState.NON_TERMINAL);

// 

 



flink git commit: [FLINK-5814] [build] Fix packaging flink-dist in unclean source directory

2017-02-16 Thread sewen
Repository: flink
Updated Branches:
  refs/heads/master f250d95b7 -> 2ec2abfae


[FLINK-5814] [build] Fix packaging flink-dist in unclean source directory

If "/build-target" already existed, running 'mvn package' for
flink-dist would create a symbolic link inside "/build-target"
instead of replacing that symlink. This commit fixes this behaviour of 'ln -sf'
by adding the --no-dereference parameter.

This closes #3331


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/2ec2abfa
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/2ec2abfa
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/2ec2abfa

Branch: refs/heads/master
Commit: 2ec2abfae58102af2d29ac65ac907f114ade4839
Parents: f250d95
Author: Nico Kruber 
Authored: Wed Feb 15 15:50:45 2017 +0100
Committer: Stephan Ewen 
Committed: Thu Feb 16 10:52:14 2017 +0100

--
 flink-dist/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/2ec2abfa/flink-dist/pom.xml
--
diff --git a/flink-dist/pom.xml b/flink-dist/pom.xml
index 7627778..0eea0aa 100644
--- a/flink-dist/pom.xml
+++ b/flink-dist/pom.xml
@@ -275,7 +275,7 @@ under the License.


ln


-   
-sf
+   
-sfn

${project.basedir}/target/flink-${project.version}-bin/flink-${project.version}

${project.basedir}/../build-target





flink git commit: [hotfix] [tests] Stabilize FastFailuresITCase

2017-02-16 Thread sewen
Repository: flink
Updated Branches:
  refs/heads/master 67e3d9d44 -> f250d95b7


[hotfix] [tests] Stabilize FastFailuresITCase

The test triggers 200 immediate failures and recoveries. The restart strategy 
allowed 200 restarts.

It may happen that another failure occurs as during the execution, in which 
case the restart attempts are not
sufficient.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/f250d95b
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/f250d95b
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/f250d95b

Branch: refs/heads/master
Commit: f250d95b74fa5713f35a61dd5537aa419d1f12c6
Parents: 67e3d9d
Author: Stephan Ewen 
Authored: Wed Feb 15 18:49:29 2017 +0100
Committer: Stephan Ewen 
Committed: Thu Feb 16 10:51:30 2017 +0100

--
 .../java/org/apache/flink/test/recovery/FastFailuresITCase.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/f250d95b/flink-tests/src/test/java/org/apache/flink/test/recovery/FastFailuresITCase.java
--
diff --git 
a/flink-tests/src/test/java/org/apache/flink/test/recovery/FastFailuresITCase.java
 
b/flink-tests/src/test/java/org/apache/flink/test/recovery/FastFailuresITCase.java
index 8a43ee4..d80c826 100644
--- 
a/flink-tests/src/test/java/org/apache/flink/test/recovery/FastFailuresITCase.java
+++ 
b/flink-tests/src/test/java/org/apache/flink/test/recovery/FastFailuresITCase.java
@@ -58,7 +58,7 @@ public class FastFailuresITCase extends TestLogger {
env.getConfig().disableSysoutLogging();
env.setParallelism(4);
env.enableCheckpointing(1000);
-   
env.getConfig().setRestartStrategy(RestartStrategies.fixedDelayRestart(200, 0));
+   
env.getConfig().setRestartStrategy(RestartStrategies.fixedDelayRestart(210, 0));

DataStream> input = env.addSource(new 
RichSourceFunction>() {
 



[2/6] flink git commit: [FLINK-5805] [docs] Improvements to docs for ProcessFunction

2017-02-16 Thread sewen
[FLINK-5805] [docs] Improvements to docs for ProcessFunction

This closes #3317


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/8fd374c9
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/8fd374c9
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/8fd374c9

Branch: refs/heads/release-1.2
Commit: 8fd374c9ed5913e42298f71c43922f11e353987e
Parents: e9ada34
Author: David Anderson 
Authored: Wed Feb 15 10:58:55 2017 +0100
Committer: Stephan Ewen 
Committed: Thu Feb 16 14:51:01 2017 +0100

--
 docs/dev/stream/process_function.md | 23 ++-
 1 file changed, 14 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/8fd374c9/docs/dev/stream/process_function.md
--
diff --git a/docs/dev/stream/process_function.md 
b/docs/dev/stream/process_function.md
index a8da4a2..ce3c670 100644
--- a/docs/dev/stream/process_function.md
+++ b/docs/dev/stream/process_function.md
@@ -47,7 +47,7 @@ stream.keyBy("id").process(new MyProcessFunction())
 
 The timers allow applications to react to changes in processing time and in 
[event time](../event_time.html).
 Every call to the function `processElement(...)` gets a `Context` object with 
gives access to the element's
-event time timestamp, and the *TimerService*. The `TimerService` can be used 
to register callbacks for future
+event time timestamp, and to the *TimerService*. The `TimerService` can be 
used to register callbacks for future
 event-/processing- time instants. When a timer's particular time is reached, 
the `onTimer(...)` method is
 called. During that call, all states are again scoped to the key with which 
the timer was created, allowing
 timers to perform keyed state manipulation as well.
@@ -55,30 +55,35 @@ timers to perform keyed state manipulation as well.
 
 ## Low-level Joins
 
-To realize low-level operations on two inputs, applications can use the 
`CoProcessFunction`. It relates to the `ProcessFunction`
-in the same way as a `CoFlatMapFunction` relates to the `FlatMapFunction`: The 
function is typed to two different inputs and
+To realize low-level operations on two inputs, applications can use 
`CoProcessFunction`. It relates to `ProcessFunction`
+in the same way that `CoFlatMapFunction` relates to `FlatMapFunction`: the 
function is bound to two different inputs and
 gets individual calls to `processElement1(...)` and `processElement2(...)` for 
records from the two different inputs.
 
-Implementing a low level join follows typically the pattern:
+Implementing a low level join typically follows this pattern:
 
   - Create a state object for one input (or both)
   - Update the state upon receiving elements from its input
   - Upon receiving elements from the other input, probe the state and produce 
the joined result
 
+For example, you might be joining customer data to financial trades,
+while keeping state for the customer data. If you care about having
+complete and deterministic joins in the face of out-of-order events,
+you can use a timer to evaluate and emit the join for a trade when the
+watermark for the customer data stream has passed the time of that
+trade.
 
 ## Example
 
-The following example maintains counts per key, and emits the key/count pair 
if no update happened to the key for one minute
-(in event time):
+The following example maintains counts per key, and emits a key/count pair 
whenever a minute passes (in event time) without an update for that key:
 
   - The count, key, and last-modification-timestamp are stored in a 
`ValueState`, which is implicitly scoped by key.
   - For each record, the `ProcessFunction` increments the counter and sets the 
last-modification timestamp
   - The function also schedules a callback one minute into the future (in 
event time)
   - Upon each callback, it checks the callback's event time timestamp against 
the last-modification time of the stored count
-and emits the key/count if the match (no further update happened in that 
minute)
+and emits the key/count if they match (i.e., no further update occurred 
during that minute)
 
-*Note:* This simple example could also have been implemented on top of session 
windows, we simple use it to illustrate
-the basic pattern of how to use the `ProcessFunction`.
+*Note:* This simple example could have been implemented with session windows. 
We use `ProcessFunction` here to illustrate
+the basic pattern it provides.
 
 
 



[1/6] flink git commit: [FLINK-5800] [checkpointing] Create CheckpointSteamFactory only once per operator

2017-02-16 Thread sewen
Repository: flink
Updated Branches:
  refs/heads/release-1.2 e9ada34f2 -> d3f2fe262


[FLINK-5800] [checkpointing] Create CheckpointSteamFactory only once per 
operator

Previously, the factory was created once per checkpoint, and its repeated 
initialization logic
(like ensuring existence of base paths) caused heavy load on some filesystems 
at very large scale.

This closes #3312


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/2c93a4c9
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/2c93a4c9
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/2c93a4c9

Branch: refs/heads/release-1.2
Commit: 2c93a4c93b68d5ea39d5ea71154a4212409da445
Parents: 8837c0c
Author: Stephan Ewen 
Authored: Tue Feb 14 18:35:59 2017 +0100
Committer: Stephan Ewen 
Committed: Thu Feb 16 14:51:01 2017 +0100

--
 .../api/operators/AbstractStreamOperator.java   | 44 ++--
 .../operators/StreamCheckpointedOperator.java   |  4 ++
 .../streaming/api/operators/StreamOperator.java | 26 ---
 .../streaming/runtime/tasks/StreamTask.java | 48 ++---
 .../operators/AbstractStreamOperatorTest.java   | 18 +++--
 .../AbstractUdfStreamOperatorLifecycleTest.java | 15 +++-
 .../streaming/runtime/tasks/StreamTaskTest.java | 73 
 .../util/AbstractStreamOperatorTestHarness.java | 16 ++---
 .../streaming/runtime/StateBackendITCase.java   |  5 +-
 9 files changed, 122 insertions(+), 127 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/2c93a4c9/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/operators/AbstractStreamOperator.java
--
diff --git 
a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/operators/AbstractStreamOperator.java
 
b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/operators/AbstractStreamOperator.java
index 9d56626..71d5501 100644
--- 
a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/operators/AbstractStreamOperator.java
+++ 
b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/operators/AbstractStreamOperator.java
@@ -114,6 +114,10 @@ public abstract class AbstractStreamOperator
/** The runtime context for UDFs */
private transient StreamingRuntimeContext runtimeContext;
 
+   // - general state ---
+
+   /** The factory that give this operator access to checkpoint storage */
+   private transient CheckpointStreamFactory checkpointStreamFactory;
 
//  key/value state --
 
@@ -127,10 +131,11 @@ public abstract class AbstractStreamOperator
/** Keyed state store view on the keyed backend */
private transient DefaultKeyedStateStore keyedStateStore;
 
+   //  operator state --
+
/** Operator state backend / store */
private transient OperatorStateBackend operatorStateBackend;
 
-
// --- Metrics ---
 
/** Metric group for the operator */
@@ -212,6 +217,8 @@ public abstract class AbstractStreamOperator
}
}
 
+   checkpointStreamFactory = 
container.createCheckpointStreamFactory(this);
+
initOperatorState(operatorStateHandlesBackend);
 
StateInitializationContext initializationContext = new 
StateInitializationContextImpl(
@@ -333,8 +340,7 @@ public abstract class AbstractStreamOperator
}
 
@Override
-   public final OperatorSnapshotResult snapshotState(
-   long checkpointId, long timestamp, 
CheckpointStreamFactory streamFactory) throws Exception {
+   public final OperatorSnapshotResult snapshotState(long checkpointId, 
long timestamp) throws Exception {
 
KeyGroupRange keyGroupRange = null != keyedStateBackend ?
keyedStateBackend.getKeyGroupRange() : 
KeyGroupRange.EMPTY_KEY_GROUP_RANGE;
@@ -344,7 +350,7 @@ public abstract class AbstractStreamOperator
try (StateSnapshotContextSynchronousImpl snapshotContext = new 
StateSnapshotContextSynchronousImpl(
checkpointId,
timestamp,
-   streamFactory,
+   checkpointStreamFactory,
keyGroupRange,
getContainingTask().getCancelables())) {
 
@@ -355,14 +361,14 @@ public abstract class AbstractStreamOperator
 
if (null != operatorStateBackend) {

snapshotInProgress.setOperatorStateManagedFuture(
- 

[6/6] flink git commit: [FLINK-5705] [WebMonitor] WebMonitor request/response use UTF-8 explicitly

2017-02-16 Thread sewen
[FLINK-5705] [WebMonitor] WebMonitor request/response use UTF-8 explicitly

This closes #3257


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/d3f2fe26
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/d3f2fe26
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/d3f2fe26

Branch: refs/heads/release-1.2
Commit: d3f2fe2625171f89404e1b90fa8c9493f5403b3a
Parents: 6114c5b
Author: shijinkui 
Authored: Fri Feb 3 17:26:18 2017 +0800
Committer: Stephan Ewen 
Committed: Thu Feb 16 15:09:56 2017 +0100

--
 .../org/apache/flink/runtime/webmonitor/HttpRequestHandler.java | 4 ++--
 .../apache/flink/runtime/webmonitor/RuntimeMonitorHandler.java  | 5 -
 2 files changed, 6 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/d3f2fe26/flink-runtime-web/src/main/java/org/apache/flink/runtime/webmonitor/HttpRequestHandler.java
--
diff --git 
a/flink-runtime-web/src/main/java/org/apache/flink/runtime/webmonitor/HttpRequestHandler.java
 
b/flink-runtime-web/src/main/java/org/apache/flink/runtime/webmonitor/HttpRequestHandler.java
index 703b621..585a2f3 100644
--- 
a/flink-runtime-web/src/main/java/org/apache/flink/runtime/webmonitor/HttpRequestHandler.java
+++ 
b/flink-runtime-web/src/main/java/org/apache/flink/runtime/webmonitor/HttpRequestHandler.java
@@ -107,8 +107,8 @@ public class HttpRequestHandler extends 
SimpleChannelInboundHandler
else if (currentRequest.getMethod() == 
HttpMethod.POST) {
// POST comes in multiple objects. 
First the request, then the contents
// keep the request and path for the 
remaining objects of the POST request
-   currentRequestPath = new 
QueryStringDecoder(currentRequest.getUri()).path();
-   currentDecoder = new 
HttpPostRequestDecoder(DATA_FACTORY, currentRequest);
+   currentRequestPath = new 
QueryStringDecoder(currentRequest.getUri(), ENCODING).path();
+   currentDecoder = new 
HttpPostRequestDecoder(DATA_FACTORY, currentRequest, ENCODING);
}
else {
throw new IOException("Unsupported HTTP 
method: " + currentRequest.getMethod().name());

http://git-wip-us.apache.org/repos/asf/flink/blob/d3f2fe26/flink-runtime-web/src/main/java/org/apache/flink/runtime/webmonitor/RuntimeMonitorHandler.java
--
diff --git 
a/flink-runtime-web/src/main/java/org/apache/flink/runtime/webmonitor/RuntimeMonitorHandler.java
 
b/flink-runtime-web/src/main/java/org/apache/flink/runtime/webmonitor/RuntimeMonitorHandler.java
index 68e1735..8dbd135 100644
--- 
a/flink-runtime-web/src/main/java/org/apache/flink/runtime/webmonitor/RuntimeMonitorHandler.java
+++ 
b/flink-runtime-web/src/main/java/org/apache/flink/runtime/webmonitor/RuntimeMonitorHandler.java
@@ -30,17 +30,18 @@ import io.netty.handler.codec.http.HttpVersion;
 import io.netty.handler.codec.http.router.KeepAliveWrite;
 import io.netty.handler.codec.http.router.Routed;
 
-import java.net.URLDecoder;
 import org.apache.flink.runtime.instance.ActorGateway;
 import org.apache.flink.runtime.webmonitor.handlers.RequestHandler;
 import org.apache.flink.util.ExceptionUtils;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
 import scala.concurrent.Future;
 import scala.concurrent.duration.FiniteDuration;
 
 import java.net.InetSocketAddress;
+import java.net.URLDecoder;
 import java.nio.charset.Charset;
 import java.util.HashMap;
 import java.util.Map;
@@ -116,6 +117,8 @@ public class RuntimeMonitorHandler extends 
RuntimeMonitorHandlerBase {
}
 

response.headers().set(HttpHeaders.Names.ACCESS_CONTROL_ALLOW_ORIGIN, "*");
+   // Content-Encoding:utf-8
+   response.headers().set(HttpHeaders.Names.CONTENT_ENCODING, 
ENCODING.name());
 
KeepAliveWrite.flush(ctx, routed.request(), response);
}



[3/6] flink git commit: [hotfix] [streaming] Properly mark non-transported fields as transient in AbstractStreamOperator

2017-02-16 Thread sewen
[hotfix] [streaming] Properly mark non-transported fields as transient in 
AbstractStreamOperator


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/8837c0c5
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/8837c0c5
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/8837c0c5

Branch: refs/heads/release-1.2
Commit: 8837c0c5f357c52c15e139ed0697e3710c610db9
Parents: 8fd374c
Author: Stephan Ewen 
Authored: Tue Feb 14 18:16:36 2017 +0100
Committer: Stephan Ewen 
Committed: Thu Feb 16 14:51:01 2017 +0100

--
 .../flink/streaming/api/operators/AbstractStreamOperator.java| 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/8837c0c5/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/operators/AbstractStreamOperator.java
--
diff --git 
a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/operators/AbstractStreamOperator.java
 
b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/operators/AbstractStreamOperator.java
index 16fe2c1..9d56626 100644
--- 
a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/operators/AbstractStreamOperator.java
+++ 
b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/operators/AbstractStreamOperator.java
@@ -134,9 +134,9 @@ public abstract class AbstractStreamOperator
// --- Metrics ---
 
/** Metric group for the operator */
-   protected MetricGroup metrics;
+   protected transient MetricGroup metrics;
 
-   protected LatencyGauge latencyGauge;
+   protected transient LatencyGauge latencyGauge;
 
//  timers --
 



[4/6] flink git commit: [hotfix] [tests] Stabilize FastFailuresITCase

2017-02-16 Thread sewen
[hotfix] [tests] Stabilize FastFailuresITCase

The test triggers 200 immediate failures and recoveries. The restart strategy 
allowed 200 restarts.

It may happen that another failure occurs as during the execution, in which 
case the restart attempts are not
sufficient.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/3429ea0a
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/3429ea0a
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/3429ea0a

Branch: refs/heads/release-1.2
Commit: 3429ea0aa7478fa3161742592a26c87660ff4617
Parents: 2c93a4c
Author: Stephan Ewen 
Authored: Wed Feb 15 18:49:29 2017 +0100
Committer: Stephan Ewen 
Committed: Thu Feb 16 14:51:20 2017 +0100

--
 .../java/org/apache/flink/test/recovery/FastFailuresITCase.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/3429ea0a/flink-tests/src/test/java/org/apache/flink/test/recovery/FastFailuresITCase.java
--
diff --git 
a/flink-tests/src/test/java/org/apache/flink/test/recovery/FastFailuresITCase.java
 
b/flink-tests/src/test/java/org/apache/flink/test/recovery/FastFailuresITCase.java
index 8a43ee4..d80c826 100644
--- 
a/flink-tests/src/test/java/org/apache/flink/test/recovery/FastFailuresITCase.java
+++ 
b/flink-tests/src/test/java/org/apache/flink/test/recovery/FastFailuresITCase.java
@@ -58,7 +58,7 @@ public class FastFailuresITCase extends TestLogger {
env.getConfig().disableSysoutLogging();
env.setParallelism(4);
env.enableCheckpointing(1000);
-   
env.getConfig().setRestartStrategy(RestartStrategies.fixedDelayRestart(200, 0));
+   
env.getConfig().setRestartStrategy(RestartStrategies.fixedDelayRestart(210, 0));

DataStream> input = env.addSource(new 
RichSourceFunction>() {
 



[5/6] flink git commit: [FLINK-5814] [build] Fix packaging flink-dist in unclean source directory

2017-02-16 Thread sewen
[FLINK-5814] [build] Fix packaging flink-dist in unclean source directory

If "/build-target" already existed, running 'mvn package' for
flink-dist would create a symbolic link inside "/build-target"
instead of replacing that symlink. This commit fixes this behaviour of 'ln -sf'
by adding the --no-dereference parameter.

This closes #3331


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/6114c5b0
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/6114c5b0
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/6114c5b0

Branch: refs/heads/release-1.2
Commit: 6114c5b01d60d37efdd7db47bf9378f8dea4385c
Parents: 3429ea0
Author: Nico Kruber 
Authored: Wed Feb 15 15:50:45 2017 +0100
Committer: Stephan Ewen 
Committed: Thu Feb 16 14:51:27 2017 +0100

--
 flink-dist/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/6114c5b0/flink-dist/pom.xml
--
diff --git a/flink-dist/pom.xml b/flink-dist/pom.xml
index 1af0775..b00b7eb 100644
--- a/flink-dist/pom.xml
+++ b/flink-dist/pom.xml
@@ -275,7 +275,7 @@ under the License.


ln


-   
-sf
+   
-sfn

${project.basedir}/target/flink-${project.version}-bin/flink-${project.version}

${project.basedir}/../build-target





flink git commit: [FLINK-5705] [WebMonitor] WebMonitor request/response use UTF-8 explicitly

2017-02-16 Thread sewen
Repository: flink
Updated Branches:
  refs/heads/master 494edb041 -> f24514339


[FLINK-5705] [WebMonitor] WebMonitor request/response use UTF-8 explicitly

This closes #3257


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/f2451433
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/f2451433
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/f2451433

Branch: refs/heads/master
Commit: f24514339c78d809a28731fa18e8df638b382e3b
Parents: 494edb0
Author: shijinkui 
Authored: Fri Feb 3 17:26:18 2017 +0800
Committer: Stephan Ewen 
Committed: Thu Feb 16 17:15:17 2017 +0100

--
 .../org/apache/flink/runtime/webmonitor/HttpRequestHandler.java | 4 ++--
 .../apache/flink/runtime/webmonitor/RuntimeMonitorHandler.java  | 5 -
 2 files changed, 6 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/f2451433/flink-runtime-web/src/main/java/org/apache/flink/runtime/webmonitor/HttpRequestHandler.java
--
diff --git 
a/flink-runtime-web/src/main/java/org/apache/flink/runtime/webmonitor/HttpRequestHandler.java
 
b/flink-runtime-web/src/main/java/org/apache/flink/runtime/webmonitor/HttpRequestHandler.java
index 703b621..585a2f3 100644
--- 
a/flink-runtime-web/src/main/java/org/apache/flink/runtime/webmonitor/HttpRequestHandler.java
+++ 
b/flink-runtime-web/src/main/java/org/apache/flink/runtime/webmonitor/HttpRequestHandler.java
@@ -107,8 +107,8 @@ public class HttpRequestHandler extends 
SimpleChannelInboundHandler
else if (currentRequest.getMethod() == 
HttpMethod.POST) {
// POST comes in multiple objects. 
First the request, then the contents
// keep the request and path for the 
remaining objects of the POST request
-   currentRequestPath = new 
QueryStringDecoder(currentRequest.getUri()).path();
-   currentDecoder = new 
HttpPostRequestDecoder(DATA_FACTORY, currentRequest);
+   currentRequestPath = new 
QueryStringDecoder(currentRequest.getUri(), ENCODING).path();
+   currentDecoder = new 
HttpPostRequestDecoder(DATA_FACTORY, currentRequest, ENCODING);
}
else {
throw new IOException("Unsupported HTTP 
method: " + currentRequest.getMethod().name());

http://git-wip-us.apache.org/repos/asf/flink/blob/f2451433/flink-runtime-web/src/main/java/org/apache/flink/runtime/webmonitor/RuntimeMonitorHandler.java
--
diff --git 
a/flink-runtime-web/src/main/java/org/apache/flink/runtime/webmonitor/RuntimeMonitorHandler.java
 
b/flink-runtime-web/src/main/java/org/apache/flink/runtime/webmonitor/RuntimeMonitorHandler.java
index 68e1735..8dbd135 100644
--- 
a/flink-runtime-web/src/main/java/org/apache/flink/runtime/webmonitor/RuntimeMonitorHandler.java
+++ 
b/flink-runtime-web/src/main/java/org/apache/flink/runtime/webmonitor/RuntimeMonitorHandler.java
@@ -30,17 +30,18 @@ import io.netty.handler.codec.http.HttpVersion;
 import io.netty.handler.codec.http.router.KeepAliveWrite;
 import io.netty.handler.codec.http.router.Routed;
 
-import java.net.URLDecoder;
 import org.apache.flink.runtime.instance.ActorGateway;
 import org.apache.flink.runtime.webmonitor.handlers.RequestHandler;
 import org.apache.flink.util.ExceptionUtils;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
 import scala.concurrent.Future;
 import scala.concurrent.duration.FiniteDuration;
 
 import java.net.InetSocketAddress;
+import java.net.URLDecoder;
 import java.nio.charset.Charset;
 import java.util.HashMap;
 import java.util.Map;
@@ -116,6 +117,8 @@ public class RuntimeMonitorHandler extends 
RuntimeMonitorHandlerBase {
}
 

response.headers().set(HttpHeaders.Names.ACCESS_CONTROL_ALLOW_ORIGIN, "*");
+   // Content-Encoding:utf-8
+   response.headers().set(HttpHeaders.Names.CONTENT_ENCODING, 
ENCODING.name());
 
KeepAliveWrite.flush(ctx, routed.request(), response);
}



[1/2] flink git commit: [FLINK-5805] [docs] Improvements to docs for ProcessFunction

2017-02-15 Thread sewen
Repository: flink
Updated Branches:
  refs/heads/master 7477c5b57 -> 5fb267de6


[FLINK-5805] [docs] Improvements to docs for ProcessFunction

This closes #3317


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/5fb267de
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/5fb267de
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/5fb267de

Branch: refs/heads/master
Commit: 5fb267de68b68bc47c469f95b3bde8eebcd42007
Parents: 33ea78e
Author: David Anderson 
Authored: Wed Feb 15 10:58:55 2017 +0100
Committer: Stephan Ewen 
Committed: Wed Feb 15 18:45:46 2017 +0100

--
 docs/dev/stream/process_function.md | 23 ++-
 1 file changed, 14 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/5fb267de/docs/dev/stream/process_function.md
--
diff --git a/docs/dev/stream/process_function.md 
b/docs/dev/stream/process_function.md
index 99a3bf6..22295be 100644
--- a/docs/dev/stream/process_function.md
+++ b/docs/dev/stream/process_function.md
@@ -47,7 +47,7 @@ stream.keyBy("id").process(new MyProcessFunction())
 
 The timers allow applications to react to changes in processing time and in 
[event time](../event_time.html).
 Every call to the function `processElement(...)` gets a `Context` object with 
gives access to the element's
-event time timestamp, and the *TimerService*. The `TimerService` can be used 
to register callbacks for future
+event time timestamp, and to the *TimerService*. The `TimerService` can be 
used to register callbacks for future
 event-/processing- time instants. When a timer's particular time is reached, 
the `onTimer(...)` method is
 called. During that call, all states are again scoped to the key with which 
the timer was created, allowing
 timers to perform keyed state manipulation as well.
@@ -55,30 +55,35 @@ timers to perform keyed state manipulation as well.
 
 ## Low-level Joins
 
-To realize low-level operations on two inputs, applications can use the 
`CoProcessFunction`. It relates to the `ProcessFunction`
-in the same way as a `CoFlatMapFunction` relates to the `FlatMapFunction`: The 
function is typed to two different inputs and
+To realize low-level operations on two inputs, applications can use 
`CoProcessFunction`. It relates to `ProcessFunction`
+in the same way that `CoFlatMapFunction` relates to `FlatMapFunction`: the 
function is bound to two different inputs and
 gets individual calls to `processElement1(...)` and `processElement2(...)` for 
records from the two different inputs.
 
-Implementing a low level join follows typically the pattern:
+Implementing a low level join typically follows this pattern:
 
   - Create a state object for one input (or both)
   - Update the state upon receiving elements from its input
   - Upon receiving elements from the other input, probe the state and produce 
the joined result
 
+For example, you might be joining customer data to financial trades,
+while keeping state for the customer data. If you care about having
+complete and deterministic joins in the face of out-of-order events,
+you can use a timer to evaluate and emit the join for a trade when the
+watermark for the customer data stream has passed the time of that
+trade.
 
 ## Example
 
-The following example maintains counts per key, and emits the key/count pair 
if no update happened to the key for one minute
-(in event time):
+The following example maintains counts per key, and emits a key/count pair 
whenever a minute passes (in event time) without an update for that key:
 
   - The count, key, and last-modification-timestamp are stored in a 
`ValueState`, which is implicitly scoped by key.
   - For each record, the `ProcessFunction` increments the counter and sets the 
last-modification timestamp
   - The function also schedules a callback one minute into the future (in 
event time)
   - Upon each callback, it checks the callback's event time timestamp against 
the last-modification time of the stored count
-and emits the key/count if the match (no further update happened in that 
minute)
+and emits the key/count if they match (i.e., no further update occurred 
during that minute)
 
-*Note:* This simple example could also have been implemented on top of session 
windows, we simple use it to illustrate
-the basic pattern of how to use the `ProcessFunction`.
+*Note:* This simple example could have been implemented with session windows. 
We use `ProcessFunction` here to illustrate
+the basic pattern it provides.
 
 
 



[2/2] flink git commit: [FLINK-5793] [runtime] fix running slot may not be add to AllocatedMap in SlotPool bug

2017-02-15 Thread sewen
[FLINK-5793] [runtime] fix running slot may not be add to AllocatedMap in 
SlotPool bug

This closes #3306


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/33ea78ea
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/33ea78ea
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/33ea78ea

Branch: refs/heads/master
Commit: 33ea78ea37fab819a329b09aa213e61c16252067
Parents: 7477c5b
Author: shuai.xus 
Authored: Tue Feb 14 14:56:41 2017 +0800
Committer: Stephan Ewen 
Committed: Wed Feb 15 18:45:46 2017 +0100

--
 .../org/apache/flink/runtime/instance/SlotPool.java| 13 -
 .../apache/flink/runtime/instance/SlotPoolTest.java|  2 ++
 2 files changed, 14 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/33ea78ea/flink-runtime/src/main/java/org/apache/flink/runtime/instance/SlotPool.java
--
diff --git 
a/flink-runtime/src/main/java/org/apache/flink/runtime/instance/SlotPool.java 
b/flink-runtime/src/main/java/org/apache/flink/runtime/instance/SlotPool.java
index 672431e..4da6c7b 100644
--- 
a/flink-runtime/src/main/java/org/apache/flink/runtime/instance/SlotPool.java
+++ 
b/flink-runtime/src/main/java/org/apache/flink/runtime/instance/SlotPool.java
@@ -436,7 +436,9 @@ public class SlotPool extends RpcEndpoint {
LOG.debug("Fulfilling pending request 
[{}] early with returned slot [{}]",

pendingRequest.allocationID(), taskManagerSlot.getSlotAllocationId());
 
-   
pendingRequest.future().complete(createSimpleSlot(taskManagerSlot, 
Locality.UNKNOWN));
+   SimpleSlot newSlot = 
createSimpleSlot(taskManagerSlot, Locality.UNKNOWN);
+   allocatedSlots.add(newSlot);
+   
pendingRequest.future().complete(newSlot);
}
else {
LOG.debug("Adding returned slot [{}] to 
available slots", taskManagerSlot.getSlotAllocationId());
@@ -627,6 +629,15 @@ public class SlotPool extends RpcEndpoint 
{
}
 
// 

+   //  Methods for tests
+   // 

+
+   @VisibleForTesting
+   AllocatedSlots getAllocatedSlots() {
+   return allocatedSlots;
+   }
+
+   // 

//  Helper classes
// 

 

http://git-wip-us.apache.org/repos/asf/flink/blob/33ea78ea/flink-runtime/src/test/java/org/apache/flink/runtime/instance/SlotPoolTest.java
--
diff --git 
a/flink-runtime/src/test/java/org/apache/flink/runtime/instance/SlotPoolTest.java
 
b/flink-runtime/src/test/java/org/apache/flink/runtime/instance/SlotPoolTest.java
index 97457e1..538e286 100644
--- 
a/flink-runtime/src/test/java/org/apache/flink/runtime/instance/SlotPoolTest.java
+++ 
b/flink-runtime/src/test/java/org/apache/flink/runtime/instance/SlotPoolTest.java
@@ -115,6 +115,7 @@ public class SlotPoolTest extends TestLogger {
assertEquals(resourceID, slot.getTaskManagerID());
assertEquals(jobId, slot.getJobID());
assertEquals(slotPool.getSlotOwner(), slot.getOwner());
+   
assertEquals(slotPool.getAllocatedSlots().get(slot.getAllocatedSlot().getSlotAllocationId()),
 slot);
}
 
@Test
@@ -153,6 +154,7 @@ public class SlotPoolTest extends TestLogger {
assertTrue(slot2.isAlive());
assertEquals(slot1.getTaskManagerID(), 
slot2.getTaskManagerID());
assertEquals(slot1.getSlotNumber(), slot2.getSlotNumber());
+   
assertEquals(slotPool.getAllocatedSlots().get(slot1.getAllocatedSlot().getSlotAllocationId()),
 slot2);
}
 
@Test



[1/2] flink git commit: [hotfix] [streaming] Properly mark non-transported fields as transient in AbstractStreamOperator

2017-02-15 Thread sewen
Repository: flink
Updated Branches:
  refs/heads/master b06522681 -> 04e6758ab


[hotfix] [streaming] Properly mark non-transported fields as transient in 
AbstractStreamOperator


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/d062448c
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/d062448c
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/d062448c

Branch: refs/heads/master
Commit: d062448c294d88e74fb5f8f2acd882850cad89bc
Parents: b065226
Author: Stephan Ewen 
Authored: Tue Feb 14 18:16:36 2017 +0100
Committer: Stephan Ewen 
Committed: Wed Feb 15 15:01:36 2017 +0100

--
 .../flink/streaming/api/operators/AbstractStreamOperator.java| 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/d062448c/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/operators/AbstractStreamOperator.java
--
diff --git 
a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/operators/AbstractStreamOperator.java
 
b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/operators/AbstractStreamOperator.java
index 05f2ed5..7a3e2ce 100644
--- 
a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/operators/AbstractStreamOperator.java
+++ 
b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/operators/AbstractStreamOperator.java
@@ -134,9 +134,9 @@ public abstract class AbstractStreamOperator
// --- Metrics ---
 
/** Metric group for the operator */
-   protected MetricGroup metrics;
+   protected transient MetricGroup metrics;
 
-   protected LatencyGauge latencyGauge;
+   protected transient LatencyGauge latencyGauge;
 
//  timers --
 



[2/2] flink git commit: [FLINK-5800] [checkpointing] Create CheckpointSteamFactory only once per operator

2017-02-15 Thread sewen
[FLINK-5800] [checkpointing] Create CheckpointSteamFactory only once per 
operator

Previously, the factory was created once per checkpoint, and its repeated 
initialization logic
(like ensuring existence of base paths) caused heavy load on some filesystems 
at very large scale.

This closes #3312


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/04e6758a
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/04e6758a
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/04e6758a

Branch: refs/heads/master
Commit: 04e6758abbadf39a12848a925e6e087e060bbe3a
Parents: d062448
Author: Stephan Ewen 
Authored: Tue Feb 14 18:35:59 2017 +0100
Committer: Stephan Ewen 
Committed: Wed Feb 15 15:08:21 2017 +0100

--
 .../api/operators/AbstractStreamOperator.java   | 44 ++--
 .../operators/StreamCheckpointedOperator.java   |  4 ++
 .../streaming/api/operators/StreamOperator.java | 26 ---
 .../streaming/runtime/tasks/StreamTask.java | 48 ++---
 .../operators/AbstractStreamOperatorTest.java   | 18 +++--
 .../AbstractUdfStreamOperatorLifecycleTest.java | 15 +++-
 .../streaming/runtime/tasks/StreamTaskTest.java | 73 
 .../util/AbstractStreamOperatorTestHarness.java | 16 ++---
 .../streaming/runtime/StateBackendITCase.java   |  5 +-
 9 files changed, 122 insertions(+), 127 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/04e6758a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/operators/AbstractStreamOperator.java
--
diff --git 
a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/operators/AbstractStreamOperator.java
 
b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/operators/AbstractStreamOperator.java
index 7a3e2ce..144247f 100644
--- 
a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/operators/AbstractStreamOperator.java
+++ 
b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/operators/AbstractStreamOperator.java
@@ -114,6 +114,10 @@ public abstract class AbstractStreamOperator
/** The runtime context for UDFs */
private transient StreamingRuntimeContext runtimeContext;
 
+   // - general state ---
+
+   /** The factory that give this operator access to checkpoint storage */
+   private transient CheckpointStreamFactory checkpointStreamFactory;
 
//  key/value state --
 
@@ -127,10 +131,11 @@ public abstract class AbstractStreamOperator
/** Keyed state store view on the keyed backend */
private transient DefaultKeyedStateStore keyedStateStore;
 
+   //  operator state --
+
/** Operator state backend / store */
private transient OperatorStateBackend operatorStateBackend;
 
-
// --- Metrics ---
 
/** Metric group for the operator */
@@ -212,6 +217,8 @@ public abstract class AbstractStreamOperator
}
}
 
+   checkpointStreamFactory = 
container.createCheckpointStreamFactory(this);
+
initOperatorState(operatorStateHandlesBackend);
 
StateInitializationContext initializationContext = new 
StateInitializationContextImpl(
@@ -333,8 +340,7 @@ public abstract class AbstractStreamOperator
}
 
@Override
-   public final OperatorSnapshotResult snapshotState(
-   long checkpointId, long timestamp, 
CheckpointStreamFactory streamFactory) throws Exception {
+   public final OperatorSnapshotResult snapshotState(long checkpointId, 
long timestamp) throws Exception {
 
KeyGroupRange keyGroupRange = null != keyedStateBackend ?
keyedStateBackend.getKeyGroupRange() : 
KeyGroupRange.EMPTY_KEY_GROUP_RANGE;
@@ -344,7 +350,7 @@ public abstract class AbstractStreamOperator
try (StateSnapshotContextSynchronousImpl snapshotContext = new 
StateSnapshotContextSynchronousImpl(
checkpointId,
timestamp,
-   streamFactory,
+   checkpointStreamFactory,
keyGroupRange,
getContainingTask().getCancelables())) {
 
@@ -355,14 +361,14 @@ public abstract class AbstractStreamOperator
 
if (null != operatorStateBackend) {

snapshotInProgress.setOperatorStateManagedFuture(
-   
operatorStateBackend.snapshot(checkpointId, timestamp, 

[1/4] flink git commit: [FLINK-5247] [streaming api] Fix checks for allowed lateness in windowed streams

2017-01-23 Thread sewen
Repository: flink
Updated Branches:
  refs/heads/release-1.2 28dd4a57a -> 1dfb3e0e2


[FLINK-5247] [streaming api] Fix checks for allowed lateness in windowed streams

Also, fix outdated documentation.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/4697b97a
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/4697b97a
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/4697b97a

Branch: refs/heads/release-1.2
Commit: 4697b97a0101cf04b43c4a6e4887adba10b4a69a
Parents: 28dd4a5
Author: Rohit Agarwal 
Authored: Sat Dec 3 12:15:45 2016 -0800
Committer: Stephan Ewen 
Committed: Mon Jan 23 19:09:14 2017 +0100

--
 .../flink/streaming/api/datastream/AllWindowedStream.java  | 6 ++
 .../apache/flink/streaming/api/datastream/WindowedStream.java  | 6 ++
 .../apache/flink/streaming/api/scala/AllWindowedStream.scala   | 2 +-
 .../org/apache/flink/streaming/api/scala/WindowedStream.scala  | 2 +-
 4 files changed, 6 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/4697b97a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/AllWindowedStream.java
--
diff --git 
a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/AllWindowedStream.java
 
b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/AllWindowedStream.java
index ae71ce5..0f0e947 100644
--- 
a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/AllWindowedStream.java
+++ 
b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/AllWindowedStream.java
@@ -123,11 +123,9 @@ public class AllWindowedStream {
@PublicEvolving
public AllWindowedStream allowedLateness(Time lateness) {
long millis = lateness.toMilliseconds();
-   if (allowedLateness < 0) {
+   if (millis < 0) {
throw new IllegalArgumentException("The allowed 
lateness cannot be negative.");
-   } else if (allowedLateness != 0 && 
!windowAssigner.isEventTime()) {
-   throw new IllegalArgumentException("Setting the allowed 
lateness is only valid for event-time windows.");
-   } else {
+   } else if (windowAssigner.isEventTime()) {
this.allowedLateness = millis;
}
return this;

http://git-wip-us.apache.org/repos/asf/flink/blob/4697b97a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/WindowedStream.java
--
diff --git 
a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/WindowedStream.java
 
b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/WindowedStream.java
index 51712e1..2df3621 100644
--- 
a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/WindowedStream.java
+++ 
b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/WindowedStream.java
@@ -141,11 +141,9 @@ public class WindowedStream {
@PublicEvolving
public WindowedStream allowedLateness(Time lateness) {
long millis = lateness.toMilliseconds();
-   if (allowedLateness < 0) {
+   if (millis < 0) {
throw new IllegalArgumentException("The allowed 
lateness cannot be negative.");
-   } else if (allowedLateness != 0 && 
!windowAssigner.isEventTime()) {
-   throw new IllegalArgumentException("Setting the allowed 
lateness is only valid for event-time windows.");
-   } else {
+   } else if (windowAssigner.isEventTime()) {
this.allowedLateness = millis;
}
return this;

http://git-wip-us.apache.org/repos/asf/flink/blob/4697b97a/flink-streaming-scala/src/main/scala/org/apache/flink/streaming/api/scala/AllWindowedStream.scala
--
diff --git 
a/flink-streaming-scala/src/main/scala/org/apache/flink/streaming/api/scala/AllWindowedStream.scala
 
b/flink-streaming-scala/src/main/scala/org/apache/flink/streaming/api/scala/AllWindowedStream.scala
index 83104e8..324689a 100644
--- 
a/flink-streaming-scala/src/main/scala/org/apache/flink/streaming/api/scala/AllWindowedStream.scala
+++ 
b/flink-streaming-scala/src/main/scala/org/apache/flink/streaming/api/scala/AllWindowedStream.scala
@@ -58,7 +58,7 @@ class AllWindowedStream[T, W <: Window](javaStream: 
JavaAllWStream[T, W]) {
 
   /**

[4/4] flink git commit: [hotfix] [streaming api] Improve JavaDocs of the user-facing checkpointing and state interfaces

2017-01-23 Thread sewen
[hotfix] [streaming api] Improve JavaDocs of the user-facing checkpointing and 
state interfaces


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/1dfb3e0e
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/1dfb3e0e
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/1dfb3e0e

Branch: refs/heads/release-1.2
Commit: 1dfb3e0e2a3b933c1a2378ccd4e462502b0da276
Parents: 083152c
Author: Stephan Ewen 
Authored: Mon Jan 23 19:07:54 2017 +0100
Committer: Stephan Ewen 
Committed: Mon Jan 23 19:44:41 2017 +0100

--
 .../runtime/state/ManagedSnapshotContext.java   |  11 +-
 .../streaming/api/checkpoint/Checkpointed.java  |  35 -
 .../checkpoint/CheckpointedAsynchronously.java  |  48 ---
 .../api/checkpoint/CheckpointedFunction.java| 133 +--
 .../api/checkpoint/ListCheckpointed.java| 125 +++--
 5 files changed, 297 insertions(+), 55 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/1dfb3e0e/flink-runtime/src/main/java/org/apache/flink/runtime/state/ManagedSnapshotContext.java
--
diff --git 
a/flink-runtime/src/main/java/org/apache/flink/runtime/state/ManagedSnapshotContext.java
 
b/flink-runtime/src/main/java/org/apache/flink/runtime/state/ManagedSnapshotContext.java
index 14156a6..de65c5d 100644
--- 
a/flink-runtime/src/main/java/org/apache/flink/runtime/state/ManagedSnapshotContext.java
+++ 
b/flink-runtime/src/main/java/org/apache/flink/runtime/state/ManagedSnapshotContext.java
@@ -29,13 +29,18 @@ import org.apache.flink.annotation.PublicEvolving;
 public interface ManagedSnapshotContext {
 
/**
-* Returns the Id of the checkpoint for which the snapshot is taken.
+* Returns the ID of the checkpoint for which the snapshot is taken.
+* 
+* The checkpoint ID is guaranteed to be strictly monotonously 
increasing across checkpoints.
+* For two completed checkpoints A and B, {@code ID_B > 
ID_A} means that checkpoint
+* B subsumes checkpoint A, i.e., checkpoint Bit 
contains a later state
+* than checkpoint A.
 */
long getCheckpointId();
 
/**
-* Returns the timestamp of the checkpoint for which the snapshot is 
taken.
+* Returns timestamp (wall clock time) when the master node triggered 
the checkpoint for which
+* the state snapshot is taken.
 */
long getCheckpointTimestamp();
-
 }

http://git-wip-us.apache.org/repos/asf/flink/blob/1dfb3e0e/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/checkpoint/Checkpointed.java
--
diff --git 
a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/checkpoint/Checkpointed.java
 
b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/checkpoint/Checkpointed.java
index fb67ea7..dd93462 100644
--- 
a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/checkpoint/Checkpointed.java
+++ 
b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/checkpoint/Checkpointed.java
@@ -27,13 +27,38 @@ import java.io.Serializable;
  * checkpointed. The functions get a call whenever a checkpoint should take 
place
  * and return a snapshot of their state, which will be checkpointed.
  * 
- * This interface marks a function as synchronously checkpointed. 
While the
- * state is written, the function is not called, so the function needs not 
return a
- * copy of its state, but may return a reference to its state. Functions that 
can
- * continue to work and mutate the state, even while the state snapshot is 
being accessed,
- * can implement the {@link CheckpointedAsynchronously} interface.
+ * Deprecation and Replacement
+ *
+ * The short cut replacement for this interface is via {@link 
ListCheckpointed} and works
+ * as shown in the example below. The {@code ListCheckpointed} interface 
returns a list of
+ * elements (
+ * 
+ * 
+ *
+ * {@code
+ * public class ExampleFunction implements MapFunction, 
ListCheckpointed {
+ *
+ * private int count;
+ *
+ * public List snapshotState(long checkpointId, long timestamp) 
throws Exception {
+ * return Collections.singletonList(this.count);
+ * }
+ *
+ * public void restoreState(List state) throws Exception {
+ * this.value = state.count.isEmpty() ? 0 : state.get(0);
+ * }
+ *
+ * public T map(T value) {
+ * count++;
+ * return value;
+ * }
+ * }
+ * }
  * 
  * @param  The type of the operator state.
+ * 
+ * @deprecated Please use {@link ListCheckpointed} as illustrated above, or
+ * {@link CheckpointedFunction} for more control over the 

[3/4] flink git commit: [FLINK-4917] [streaming api] Deprecate "CheckpointedAsynchronously" interface

2017-01-23 Thread sewen
[FLINK-4917] [streaming api] Deprecate "CheckpointedAsynchronously" interface

This closes #3087


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/083152c6
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/083152c6
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/083152c6

Branch: refs/heads/release-1.2
Commit: 083152c63086e465b71c702815a2d22af686f122
Parents: 159f51b
Author: mtunique 
Authored: Tue Jan 10 23:05:44 2017 +0800
Committer: Stephan Ewen 
Committed: Mon Jan 23 19:18:59 2017 +0100

--
 .../api/checkpoint/CheckpointedAsynchronously.java   | 15 +++
 1 file changed, 15 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/083152c6/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/checkpoint/CheckpointedAsynchronously.java
--
diff --git 
a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/checkpoint/CheckpointedAsynchronously.java
 
b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/checkpoint/CheckpointedAsynchronously.java
index 8ad5ad0..6fcc1d5 100644
--- 
a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/checkpoint/CheckpointedAsynchronously.java
+++ 
b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/checkpoint/CheckpointedAsynchronously.java
@@ -36,6 +36,21 @@ import java.io.Serializable;
  * To be able to support asynchronous snapshots, the state returned by the
  * {@link #snapshotState(long, long)} method is typically a copy or shadow copy
  * of the actual state.
+ * @deprecated Please use {@link ListCheckpointed} and {@link 
CheckpointedFunction}.
+ *
+ * The short cut replacement via {@link ListCheckpointed}
+ * {@code
+ *  public class ExampleOperator implements ListCheckpointed {
+ *
+ * public List snapshotState(long checkpointId, long 
timestamp) throws Exception {
+ *  return Collections.singletonList(this.value);
+ * }
+ *
+ * public void restoreState(List state) throws Exception {
+ * this.value = state.get(0);
+ * }
+ * }
  */
+@Deprecated
 @PublicEvolving
 public interface CheckpointedAsynchronously extends 
Checkpointed {}



[2/4] flink git commit: [hotfix] [streaming api] Minor cleanup in WindowedStream and AllWindowedStream

2017-01-23 Thread sewen
[hotfix] [streaming api] Minor cleanup in WindowedStream and AllWindowedStream


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/159f51b2
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/159f51b2
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/159f51b2

Branch: refs/heads/release-1.2
Commit: 159f51b23dfdf2b6ccba728dbbe4c517ca532dbc
Parents: 4697b97
Author: Stephan Ewen 
Authored: Mon Jan 23 14:55:48 2017 +0100
Committer: Stephan Ewen 
Committed: Mon Jan 23 19:16:03 2017 +0100

--
 .../streaming/api/datastream/AllWindowedStream.java | 12 ++--
 .../flink/streaming/api/datastream/WindowedStream.java  | 12 ++--
 2 files changed, 12 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/159f51b2/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/AllWindowedStream.java
--
diff --git 
a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/AllWindowedStream.java
 
b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/AllWindowedStream.java
index 0f0e947..6c57391 100644
--- 
a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/AllWindowedStream.java
+++ 
b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/AllWindowedStream.java
@@ -53,6 +53,8 @@ import 
org.apache.flink.streaming.runtime.operators.windowing.functions.Internal
 import org.apache.flink.streaming.runtime.streamrecord.StreamElementSerializer;
 import org.apache.flink.streaming.runtime.streamrecord.StreamRecord;
 
+import static org.apache.flink.util.Preconditions.checkArgument;
+
 /**
  * A {@code AllWindowedStream} represents a data stream where the stream of
  * elements is split into windows based on a
@@ -122,12 +124,10 @@ public class AllWindowedStream {
 */
@PublicEvolving
public AllWindowedStream allowedLateness(Time lateness) {
-   long millis = lateness.toMilliseconds();
-   if (millis < 0) {
-   throw new IllegalArgumentException("The allowed 
lateness cannot be negative.");
-   } else if (windowAssigner.isEventTime()) {
-   this.allowedLateness = millis;
-   }
+   final long millis = lateness.toMilliseconds();
+   checkArgument(millis >= 0, "The allowed lateness cannot be 
negative.");
+
+   this.allowedLateness = millis;
return this;
}
 

http://git-wip-us.apache.org/repos/asf/flink/blob/159f51b2/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/WindowedStream.java
--
diff --git 
a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/WindowedStream.java
 
b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/WindowedStream.java
index 2df3621..b20e67a 100644
--- 
a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/WindowedStream.java
+++ 
b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/WindowedStream.java
@@ -63,6 +63,8 @@ import 
org.apache.flink.streaming.runtime.operators.windowing.WindowOperator;
 import org.apache.flink.streaming.runtime.streamrecord.StreamElementSerializer;
 import org.apache.flink.streaming.runtime.streamrecord.StreamRecord;
 
+import static org.apache.flink.util.Preconditions.checkArgument;
+
 /**
  * A {@code WindowedStream} represents a data stream where elements are 
grouped by
  * key, and for each key, the stream of elements is split into windows based 
on a
@@ -140,12 +142,10 @@ public class WindowedStream {
 */
@PublicEvolving
public WindowedStream allowedLateness(Time lateness) {
-   long millis = lateness.toMilliseconds();
-   if (millis < 0) {
-   throw new IllegalArgumentException("The allowed 
lateness cannot be negative.");
-   } else if (windowAssigner.isEventTime()) {
-   this.allowedLateness = millis;
-   }
+   final long millis = lateness.toMilliseconds();
+   checkArgument(millis >= 0, "The allowed lateness cannot be 
negative.");
+
+   this.allowedLateness = millis;
return this;
}
 



[3/4] flink git commit: [FLINK-5718] [core] TaskManagers exit the JVM on fatal exceptions.

2017-02-10 Thread sewen
[FLINK-5718] [core] TaskManagers exit the JVM on fatal exceptions.

This closes #3276


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/dfc6fba5
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/dfc6fba5
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/dfc6fba5

Branch: refs/heads/master
Commit: dfc6fba5b9830e6a7804a6a0c9f69b36bf772730
Parents: 3bde6ff
Author: Stephan Ewen 
Authored: Mon Feb 6 15:52:39 2017 +0100
Committer: Stephan Ewen 
Committed: Fri Feb 10 16:28:31 2017 +0100

--
 docs/setup/config.md|   4 +-
 .../flink/configuration/TaskManagerOptions.java |   5 +
 .../org/apache/flink/util/ExceptionUtils.java   |  37 +++
 .../taskexecutor/TaskManagerConfiguration.java  |  19 +-
 .../apache/flink/runtime/taskmanager/Task.java  |  14 +
 .../taskmanager/TaskManagerRuntimeInfo.java |   8 +
 ...askManagerComponentsStartupShutdownTest.java |   3 +-
 .../flink/runtime/testutils/TestJvmProcess.java |   9 +
 .../runtime/util/JvmExitOnFatalErrorTest.java   | 259 +++
 .../util/TestingTaskManagerRuntimeInfo.java |   6 +
 .../flink/core/testutils/CommonTestUtils.java   |  25 ++
 11 files changed, 385 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/dfc6fba5/docs/setup/config.md
--
diff --git a/docs/setup/config.md b/docs/setup/config.md
index 2accdc2..b86c534 100644
--- a/docs/setup/config.md
+++ b/docs/setup/config.md
@@ -86,7 +86,7 @@ The default fraction for managed memory can be adjusted using 
the `taskmanager.m
 
 - `taskmanager.memory.segment-size`: The size of memory buffers used by the 
memory manager and the network stack in bytes (DEFAULT: 32768 (= 32 KiBytes)).
 
-- `taskmanager.memory.preallocate`: Can be either of `true` or `false`. 
Specifies whether task managers should allocate all managed memory when 
starting up. (DEFAULT: false). When `taskmanager.memory.off-heap` is set to 
`true`, then it is advised that this configuration is also set to `true`.  If 
this configuration is set to `false` cleaning up of the allocated offheap 
memory happens only when the configured JVM parameter MaxDirectMemorySize is 
reached by triggering a full GC.
+- `taskmanager.memory.preallocate`: Can be either of `true` or `false`. 
Specifies whether task managers should allocate all managed memory when 
starting up. (DEFAULT: false). When `taskmanager.memory.off-heap` is set to 
`true`, then it is advised that this configuration is also set to `true`.  If 
this configuration is set to `false` cleaning up of the allocated offheap 
memory happens only when the configured JVM parameter MaxDirectMemorySize is 
reached by triggering a full GC. **Note:** For streaming setups, we highly 
recommend to set this value to `false` as the core state backends currently do 
not use the managed memory.
 
 ### Memory and Performance Debugging
 
@@ -265,6 +265,8 @@ The following parameters configure Flink's JobManager and 
TaskManagers.
 
 - `taskmanager.refused-registration-pause`: The pause after a registration has 
been refused by the job manager before retrying to connect. The refused 
registration pause requires a time unit specifier (ms/s/min/h/d) (e.g. "5 s"). 
(DEFAULT: **10 s**)
 
+- `taskmanager.jvm-exit-on-oom`: Indicates that the TaskManager should 
immediately terminate the JVM if the task thread throws an `OutOfMemoryError` 
(DEFAULT: **false**).
+
 - `blob.fetch.retries`: The number of retries for the TaskManager to download 
BLOBs (such as JAR files) from the JobManager (DEFAULT: **50**).
 
 - `blob.fetch.num-concurrent`: The number concurrent BLOB fetches (such as JAR 
file downloads) that the JobManager serves (DEFAULT: **50**).

http://git-wip-us.apache.org/repos/asf/flink/blob/dfc6fba5/flink-core/src/main/java/org/apache/flink/configuration/TaskManagerOptions.java
--
diff --git 
a/flink-core/src/main/java/org/apache/flink/configuration/TaskManagerOptions.java
 
b/flink-core/src/main/java/org/apache/flink/configuration/TaskManagerOptions.java
index 6f6238b..b7ee20a 100644
--- 
a/flink-core/src/main/java/org/apache/flink/configuration/TaskManagerOptions.java
+++ 
b/flink-core/src/main/java/org/apache/flink/configuration/TaskManagerOptions.java
@@ -33,6 +33,11 @@ public class TaskManagerOptions {
// 

 
// @TODO Migrate 'taskmanager.*' config options from ConfigConstants
+   
+   /** Whether to kill the TaskManager when the task thread throws an 
OutOfMemoryError */
+   public static final ConfigOption KILL_ON_OUT_OF_MEMORY =
+   

[4/4] flink git commit: [hotfix] [dist] Add notice about memory pre-allocation to default 'flink-conf.yaml'

2017-02-10 Thread sewen
[hotfix] [dist] Add notice about memory pre-allocation to default 
'flink-conf.yaml'


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/e29dfb84
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/e29dfb84
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/e29dfb84

Branch: refs/heads/master
Commit: e29dfb840495e0c1fd01e87a4af1abbf98103fa4
Parents: dfc6fba
Author: Stephan Ewen 
Authored: Fri Feb 10 14:53:58 2017 +0100
Committer: Stephan Ewen 
Committed: Fri Feb 10 16:28:31 2017 +0100

--
 flink-dist/src/main/resources/flink-conf.yaml | 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/e29dfb84/flink-dist/src/main/resources/flink-conf.yaml
--
diff --git a/flink-dist/src/main/resources/flink-conf.yaml 
b/flink-dist/src/main/resources/flink-conf.yaml
index 0f30595..72acbeb 100644
--- a/flink-dist/src/main/resources/flink-conf.yaml
+++ b/flink-dist/src/main/resources/flink-conf.yaml
@@ -53,6 +53,8 @@ taskmanager.numberOfTaskSlots: 1
 
 # Specify whether TaskManager memory should be allocated when starting up 
(true) or when
 # memory is required in the memory manager (false)
+# Important Note: For pure streaming setups, we highly recommend to set this 
value to `false`
+# as the default state backends currently do not use the managed memory.
 
 taskmanager.memory.preallocate: false
 



[2/4] flink git commit: [FLINK-5759] [jobmanager] Set UncaughtExceptionHandlers for JobManager's Future and I/O thread pools

2017-02-10 Thread sewen
[FLINK-5759] [jobmanager] Set UncaughtExceptionHandlers for JobManager's Future 
and I/O thread pools

This closes #3290


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/ef77c254
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/ef77c254
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/ef77c254

Branch: refs/heads/master
Commit: ef77c254dadbe4c04810681fe765f5ec7d2a7400
Parents: 6630513
Author: Stephan Ewen 
Authored: Thu Feb 9 14:04:17 2017 +0100
Committer: Stephan Ewen 
Committed: Fri Feb 10 16:28:30 2017 +0100

--
 .../MesosApplicationMasterRunner.java   |  10 +-
 .../flink/runtime/filecache/FileCache.java  |   3 +-
 .../runtime/jobmaster/JobManagerServices.java   |   6 +-
 .../runtime/util/ExecutorThreadFactory.java | 123 ++-
 .../flink/runtime/util/NamedThreadFactory.java  |  58 -
 .../flink/runtime/jobmanager/JobManager.scala   |   4 +-
 .../runtime/minicluster/FlinkMiniCluster.scala  |  10 +-
 .../flink/yarn/YarnApplicationMasterRunner.java |   8 +-
 8 files changed, 119 insertions(+), 103 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/ef77c254/flink-mesos/src/main/java/org/apache/flink/mesos/runtime/clusterframework/MesosApplicationMasterRunner.java
--
diff --git 
a/flink-mesos/src/main/java/org/apache/flink/mesos/runtime/clusterframework/MesosApplicationMasterRunner.java
 
b/flink-mesos/src/main/java/org/apache/flink/mesos/runtime/clusterframework/MesosApplicationMasterRunner.java
index 5033692..a23c9f6 100644
--- 
a/flink-mesos/src/main/java/org/apache/flink/mesos/runtime/clusterframework/MesosApplicationMasterRunner.java
+++ 
b/flink-mesos/src/main/java/org/apache/flink/mesos/runtime/clusterframework/MesosApplicationMasterRunner.java
@@ -22,10 +22,12 @@ import akka.actor.ActorRef;
 import akka.actor.ActorSystem;
 import akka.actor.Address;
 import akka.actor.Props;
+
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.CommandLineParser;
 import org.apache.commons.cli.Options;
 import org.apache.commons.cli.PosixParser;
+
 import org.apache.flink.configuration.ConfigConstants;
 import org.apache.flink.configuration.Configuration;
 import org.apache.flink.configuration.GlobalConfiguration;
@@ -52,15 +54,17 @@ import 
org.apache.flink.runtime.leaderretrieval.LeaderRetrievalService;
 import org.apache.flink.runtime.process.ProcessReaper;
 import org.apache.flink.runtime.security.SecurityUtils;
 import org.apache.flink.runtime.util.EnvironmentInformation;
+import org.apache.flink.runtime.util.ExecutorThreadFactory;
 import org.apache.flink.runtime.util.Hardware;
 import org.apache.flink.runtime.util.JvmShutdownSafeguard;
 import org.apache.flink.runtime.util.LeaderRetrievalUtils;
-import org.apache.flink.runtime.util.NamedThreadFactory;
 import org.apache.flink.runtime.util.SignalHandler;
 import org.apache.flink.runtime.webmonitor.WebMonitor;
 import org.apache.mesos.Protos;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
 import scala.concurrent.duration.Duration;
 import scala.concurrent.duration.FiniteDuration;
 
@@ -216,11 +220,11 @@ public class MesosApplicationMasterRunner {
 
futureExecutor = Executors.newScheduledThreadPool(
numberProcessors,
-   new 
NamedThreadFactory("mesos-jobmanager-future-", "-thread-"));
+   new 
ExecutorThreadFactory("mesos-jobmanager-future"));
 
ioExecutor = Executors.newFixedThreadPool(
numberProcessors,
-   new NamedThreadFactory("mesos-jobmanager-io-", 
"-thread-"));
+   new 
ExecutorThreadFactory("mesos-jobmanager-io"));
 
mesosServices = 
MesosServicesUtils.createMesosServices(config);
 

http://git-wip-us.apache.org/repos/asf/flink/blob/ef77c254/flink-runtime/src/main/java/org/apache/flink/runtime/filecache/FileCache.java
--
diff --git 
a/flink-runtime/src/main/java/org/apache/flink/runtime/filecache/FileCache.java 
b/flink-runtime/src/main/java/org/apache/flink/runtime/filecache/FileCache.java
index 21456de..4f2166f 100644
--- 
a/flink-runtime/src/main/java/org/apache/flink/runtime/filecache/FileCache.java
+++ 
b/flink-runtime/src/main/java/org/apache/flink/runtime/filecache/FileCache.java
@@ -99,7 +99,8 @@ public class FileCache {
this.shutdownHook = createShutdownHook(this, LOG);
 
this.entries = new HashMap>>();
-   

[1/4] flink git commit: [FLINK-5766] [distributed coordination] Unify the handling of NoResourceAvailableException

2017-02-10 Thread sewen
Repository: flink
Updated Branches:
  refs/heads/master 66305135b -> e29dfb840


[FLINK-5766] [distributed coordination] Unify the handling of 
NoResourceAvailableException


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/3bde6ffb
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/3bde6ffb
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/3bde6ffb

Branch: refs/heads/master
Commit: 3bde6ffb6f55ec7ff807633ab1e79d9238e5a942
Parents: ef77c25
Author: Stephan Ewen 
Authored: Thu Feb 9 19:12:32 2017 +0100
Committer: Stephan Ewen 
Committed: Fri Feb 10 16:28:30 2017 +0100

--
 .../flink/runtime/executiongraph/Execution.java |  4 +-
 .../executiongraph/ExecutionJobVertex.java  |  4 +-
 .../runtime/executiongraph/ExecutionVertex.java |  3 +-
 .../flink/runtime/instance/SlotProvider.java|  5 +-
 .../runtime/jobmanager/scheduler/Scheduler.java | 28 ++-
 .../ScheduleWithCoLocationHintTest.java | 16 ---
 .../scheduler/SchedulerIsolatedTasksTest.java   | 11 +++--
 .../scheduler/SchedulerSlotSharingTest.java | 49 ++--
 8 files changed, 63 insertions(+), 57 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/3bde6ffb/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/Execution.java
--
diff --git 
a/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/Execution.java
 
b/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/Execution.java
index e29e5b6..60e5575 100644
--- 
a/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/Execution.java
+++ 
b/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/Execution.java
@@ -40,7 +40,6 @@ import org.apache.flink.runtime.instance.SlotProvider;
 import org.apache.flink.runtime.io.network.ConnectionID;
 import org.apache.flink.runtime.io.network.partition.ResultPartitionID;
 import org.apache.flink.runtime.jobmanager.scheduler.CoLocationConstraint;
-import 
org.apache.flink.runtime.jobmanager.scheduler.NoResourceAvailableException;
 import org.apache.flink.runtime.jobmanager.scheduler.ScheduledUnit;
 import org.apache.flink.runtime.jobmanager.scheduler.SlotSharingGroup;
 import org.apache.flink.runtime.jobmanager.slots.TaskManagerGateway;
@@ -248,9 +247,8 @@ public class Execution implements AccessExecution, 
Archiveablehttp://git-wip-us.apache.org/repos/asf/flink/blob/3bde6ffb/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ExecutionJobVertex.java
--
diff --git 
a/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ExecutionJobVertex.java
 
b/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ExecutionJobVertex.java
index e8664f7..3828fc9 100644
--- 
a/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ExecutionJobVertex.java
+++ 
b/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ExecutionJobVertex.java
@@ -39,11 +39,11 @@ import org.apache.flink.runtime.jobgraph.JobVertex;
 import org.apache.flink.runtime.jobgraph.JobVertexID;
 import org.apache.flink.runtime.jobmanager.JobManagerOptions;
 import org.apache.flink.runtime.jobmanager.scheduler.CoLocationGroup;
-import 
org.apache.flink.runtime.jobmanager.scheduler.NoResourceAvailableException;
 import org.apache.flink.runtime.jobmanager.scheduler.SlotSharingGroup;
 import org.apache.flink.runtime.state.KeyGroupRangeAssignment;
 import org.apache.flink.util.Preconditions;
 import org.apache.flink.util.SerializedValue;
+
 import org.slf4j.Logger;
 
 import java.io.IOException;
@@ -386,7 +386,7 @@ public class ExecutionJobVertex implements 
AccessExecutionJobVertex, Archiveable
//  Actions

//-

-   public void scheduleAll(SlotProvider slotProvider, boolean queued) 
throws NoResourceAvailableException {
+   public void scheduleAll(SlotProvider slotProvider, boolean queued) {

ExecutionVertex[] vertices = this.taskVertices;
 

http://git-wip-us.apache.org/repos/asf/flink/blob/3bde6ffb/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ExecutionVertex.java
--
diff --git 
a/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ExecutionVertex.java
 
b/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ExecutionVertex.java
index cb2e177..92327fd 100644
--- 
a/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ExecutionVertex.java
+++ 

[01/10] flink git commit: [hotfix] [docs] Move 'dev/state_backends' to 'ops/state_backends'

2017-01-16 Thread sewen
Repository: flink
Updated Branches:
  refs/heads/release-1.2 33c5df6dd -> f4869a66d


[hotfix] [docs] Move 'dev/state_backends' to 'ops/state_backends'


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/daad28ab
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/daad28ab
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/daad28ab

Branch: refs/heads/release-1.2
Commit: daad28ab5431ba1ab280a2024b5d28b70b0713ee
Parents: 33c5df6
Author: Stephan Ewen 
Authored: Mon Jan 9 19:39:44 2017 +0100
Committer: Stephan Ewen 
Committed: Mon Jan 16 11:52:41 2017 +0100

--
 docs/dev/state.md|   4 +-
 docs/dev/state_backends.md   | 148 --
 docs/ops/README.md   |  21 +
 docs/ops/state_backends.md   | 148 ++
 docs/redirects/state_backends.md |   2 +-
 docs/setup/aws.md|   4 +-
 docs/setup/savepoints.md |   2 +-
 7 files changed, 175 insertions(+), 154 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/daad28ab/docs/dev/state.md
--
diff --git a/docs/dev/state.md b/docs/dev/state.md
index 73a4ceb..4478bfc 100644
--- a/docs/dev/state.md
+++ b/docs/dev/state.md
@@ -40,7 +40,7 @@ Flink's state interface.
 By default state checkpoints will be stored in-memory at the JobManager. For 
proper persistence of large
 state, Flink supports storing the checkpoints on file systems (HDFS, S3, or 
any mounted POSIX file system),
 which can be configured in the `flink-conf.yaml` or via 
`StreamExecutionEnvironment.setStateBackend(…)`.
-See [state backends]({{ site.baseurl }}/dev/state_backends.html) for 
information
+See [state backends]({{ site.baseurl }}/ops/state_backends.html) for 
information
 about the available state backends and how to configure them.
 
 * ToC
@@ -52,7 +52,7 @@ Enabling Checkpointing
 Flink has a checkpointing mechanism that recovers streaming jobs after 
failures. The checkpointing mechanism requires a *persistent* (or *durable*) 
source that
 can be asked for prior records again (Apache Kafka is a good example of such a 
source).
 
-The checkpointing mechanism stores the progress in the data sources and data 
sinks, the state of windows, as well as the user-defined state (see [Working 
with State]({{ site.baseurl }}/dev/state.html)) consistently to provide 
*exactly once* processing semantics. Where the checkpoints are stored (e.g., 
JobManager memory, file system, database) depends on the configured [state 
backend]({{ site.baseurl }}/dev/state_backends.html).
+The checkpointing mechanism stores the progress in the data sources and data 
sinks, the state of windows, as well as the user-defined state (see [Working 
with State]({{ site.baseurl }}/dev/state.html)) consistently to provide 
*exactly once* processing semantics. Where the checkpoints are stored (e.g., 
JobManager memory, file system, database) depends on the configured [state 
backend]({{ site.baseurl }}/ops/state_backends.html).
 
 The [docs on streaming fault tolerance]({{ site.baseurl 
}}/internals/stream_checkpointing.html) describe in detail the technique behind 
Flink's streaming fault tolerance mechanism.
 

http://git-wip-us.apache.org/repos/asf/flink/blob/daad28ab/docs/dev/state_backends.md
--
diff --git a/docs/dev/state_backends.md b/docs/dev/state_backends.md
deleted file mode 100644
index af9934d..000
--- a/docs/dev/state_backends.md
+++ /dev/null
@@ -1,148 +0,0 @@

-title: "State Backends"
-nav-parent_id: setup
-nav-pos: 11

-
-
-Programs written in the [Data Stream API]({{ site.baseurl 
}}/dev/datastream_api.html) often hold state in various forms:
-
-- Windows gather elements or aggregates until they are triggered
-- Transformation functions may use the key/value state interface to store 
values
-- Transformation functions may implement the `Checkpointed` interface to make 
their local variables fault tolerant
-
-See also [Working with State]({{ site.baseurl }}/dev/state.html) in the 
streaming API guide.
-
-When checkpointing is activated, such state is persisted upon checkpoints to 
guard against data loss and recover consistently.
-How the state is represented internally, and how and where it is persisted 
upon checkpoints depends on the
-chosen **State Backend**.
-
-* ToC
-{:toc}
-
-## Available State Backends
-
-Out of the box, Flink bundles these state backends:
-
- - *MemoryStateBackend*
- - *FsStateBackend*
- - *RocksDBStateBackend*
-
-If nothing else is configured, the system will use the MemoryStateBackend.
-
-
-### The MemoryStateBackend
-
-The *MemoryStateBackend* holds data internally as 

[05/10] flink git commit: [FLINK-5456] [docs] Add stub for types of state and state interfaces

2017-01-16 Thread sewen
[FLINK-5456] [docs] Add stub for types of state and state interfaces


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/58509531
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/58509531
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/58509531

Branch: refs/heads/release-1.2
Commit: 585095312a59fee953d6b370db0a939a8392dd19
Parents: ac193d6
Author: Stephan Ewen 
Authored: Tue Jan 10 12:31:21 2017 +0100
Committer: Stephan Ewen 
Committed: Mon Jan 16 11:53:24 2017 +0100

--
 docs/dev/state.md| 362 --
 docs/dev/stream/checkpointing.md | 152 ++
 docs/dev/stream/state.md |  78 
 docs/internals/state_backends.md |  71 ---
 4 files changed, 230 insertions(+), 433 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/58509531/docs/dev/state.md
--
diff --git a/docs/dev/state.md b/docs/dev/state.md
deleted file mode 100644
index 4478bfc..000
--- a/docs/dev/state.md
+++ /dev/null
@@ -1,362 +0,0 @@

-title: "State & Checkpointing"
-nav-parent_id: streaming
-nav-id: state
-nav-pos: 40

-
-
-All transformations in Flink may look like functions (in the functional 
processing terminology), but
-are in fact stateful operators. You can make *every* transformation (`map`, 
`filter`, etc) stateful
-by using Flink's state interface or checkpointing instance fields of your 
function. You can register
-any instance field
-as ***managed*** state by implementing an interface. In this case, and also in 
the case of using
-Flink's native state interface, Flink will automatically take consistent 
snapshots of your state
-periodically, and restore its value in the case of a failure.
-
-The end effect is that updates to any form of state are the same under 
failure-free execution and
-execution under failures.
-
-First, we look at how to make instance fields consistent under failures, and 
then we look at
-Flink's state interface.
-
-By default state checkpoints will be stored in-memory at the JobManager. For 
proper persistence of large
-state, Flink supports storing the checkpoints on file systems (HDFS, S3, or 
any mounted POSIX file system),
-which can be configured in the `flink-conf.yaml` or via 
`StreamExecutionEnvironment.setStateBackend(…)`.
-See [state backends]({{ site.baseurl }}/ops/state_backends.html) for 
information
-about the available state backends and how to configure them.
-
-* ToC
-{:toc}
-
-Enabling Checkpointing
--
-
-Flink has a checkpointing mechanism that recovers streaming jobs after 
failures. The checkpointing mechanism requires a *persistent* (or *durable*) 
source that
-can be asked for prior records again (Apache Kafka is a good example of such a 
source).
-
-The checkpointing mechanism stores the progress in the data sources and data 
sinks, the state of windows, as well as the user-defined state (see [Working 
with State]({{ site.baseurl }}/dev/state.html)) consistently to provide 
*exactly once* processing semantics. Where the checkpoints are stored (e.g., 
JobManager memory, file system, database) depends on the configured [state 
backend]({{ site.baseurl }}/ops/state_backends.html).
-
-The [docs on streaming fault tolerance]({{ site.baseurl 
}}/internals/stream_checkpointing.html) describe in detail the technique behind 
Flink's streaming fault tolerance mechanism.
-
-By default, checkpointing is disabled. To enable checkpointing, call 
`enableCheckpointing(n)` on the `StreamExecutionEnvironment`, where *n* is the 
checkpoint interval in milliseconds.
-
-Other parameters for checkpointing include:
-
-- *exactly-once vs. at-least-once*: You can optionally pass a mode to the 
`enableCheckpointing(n)` method to choose between the two guarantee levels.
-  Exactly-once is preferrable for most applications. At-least-once may be 
relevant for certain super-low-latency (consistently few milliseconds) 
applications.
-
-- *number of concurrent checkpoints*: By default, the system will not trigger 
another checkpoint while one is still in progress. This ensures that the 
topology does not spend too much time on checkpoints and not make progress with 
processing the streams. It is possible to allow for multiple overlapping 
checkpoints, which is interesting for pipelines that have a certain processing 
delay (for example because the functions call external services that need some 
time to respond) but that still want to do very frequent checkpoints (100s of 
milliseconds) to re-process very little upon failures.
-
-- *checkpoint timeout*: The time after which a checkpoint-in-progress is 
aborted, if it did not complete by then.
-
-
-
-{% highlight java %}

[08/10] flink git commit: [FLINK-5459] [docs] Add templates for debugging classloading and debugging event time issues

2017-01-16 Thread sewen
[FLINK-5459] [docs] Add templates for debugging classloading and debugging 
event time issues


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/ef185d77
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/ef185d77
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/ef185d77

Branch: refs/heads/release-1.2
Commit: ef185d777d6c5debf139833b6af88e9cbfa583bf
Parents: 4b0c4d9
Author: Stephan Ewen 
Authored: Tue Jan 10 22:56:16 2017 +0100
Committer: Stephan Ewen 
Committed: Mon Jan 16 11:53:55 2017 +0100

--
 docs/monitoring/debugging_classloading.md | 45 ++
 docs/monitoring/debugging_event_time.md   | 37 +
 2 files changed, 82 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/ef185d77/docs/monitoring/debugging_classloading.md
--
diff --git a/docs/monitoring/debugging_classloading.md 
b/docs/monitoring/debugging_classloading.md
new file mode 100644
index 000..e4e908e
--- /dev/null
+++ b/docs/monitoring/debugging_classloading.md
@@ -0,0 +1,45 @@
+---
+title: "Debugging Classloading"
+nav-parent_id: monitoring
+nav-pos: 8
+---
+
+
+* ToC
+{:toc}
+
+## Overview of Classloading in Flink
+
+  - What is in the Application Classloader for different deployment techs
+  - What is in the user code classloader
+
+  - Access to the user code classloader for applications
+
+## Classpath Setups
+
+  - Finding classpaths in logs
+  - Moving libraries and/or user code to the Application Classpath 
+
+## Unloading of Dynamically Loaded Classes
+
+  - Checkpoint statistics overview
+  - Interpret time until checkpoints
+  - Synchronous vs. asynchronous checkpoint time
+

http://git-wip-us.apache.org/repos/asf/flink/blob/ef185d77/docs/monitoring/debugging_event_time.md
--
diff --git a/docs/monitoring/debugging_event_time.md 
b/docs/monitoring/debugging_event_time.md
new file mode 100644
index 000..e87db0a
--- /dev/null
+++ b/docs/monitoring/debugging_event_time.md
@@ -0,0 +1,37 @@
+---
+title: "Debugging Windows & Event Time"
+nav-parent_id: monitoring
+nav-pos: 9
+---
+
+
+* ToC
+{:toc}
+
+## Monitoring Current Event Time
+
+  - What metrics, how to access in the UI
+  - Event time dominated by furthest-behind source
+
+## Handling Event Time Stragglers
+
+  - Approach 1: Watermark stays late (indicated completeness), windows fire 
early
+  - Approach 2: Watermark heuristic with maximum lateness, windows accept late 
data
+



[06/10] flink git commit: [FLINK-5457] [docs] Add stub for Async I/O docs

2017-01-16 Thread sewen
[FLINK-5457] [docs] Add stub for Async I/O docs


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/2730e895
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/2730e895
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/2730e895

Branch: refs/heads/release-1.2
Commit: 2730e895fc87970f509ff8297222d6791fd31fdf
Parents: 5850953
Author: Stephan Ewen 
Authored: Tue Jan 10 12:36:51 2017 +0100
Committer: Stephan Ewen 
Committed: Mon Jan 16 11:53:36 2017 +0100

--
 docs/dev/stream/asyncio.md | 28 
 1 file changed, 28 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/2730e895/docs/dev/stream/asyncio.md
--
diff --git a/docs/dev/stream/asyncio.md b/docs/dev/stream/asyncio.md
new file mode 100644
index 000..2d0867a
--- /dev/null
+++ b/docs/dev/stream/asyncio.md
@@ -0,0 +1,28 @@
+---
+title: "Async I/O for External Data Access"
+nav-parent_id: streaming
+nav-pos: 60
+---
+
+
+* ToC
+{:toc}
+
+**TDB**



[07/10] flink git commit: [FLINK-5458] [docs] Add a template for migration guide

2017-01-16 Thread sewen
[FLINK-5458] [docs] Add a template for migration guide


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/4b0c4d96
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/4b0c4d96
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/4b0c4d96

Branch: refs/heads/release-1.2
Commit: 4b0c4d969e53caa986a47465c3600484dbd14e5e
Parents: 2730e89
Author: Stephan Ewen 
Authored: Tue Jan 10 15:44:42 2017 +0100
Committer: Stephan Ewen 
Committed: Mon Jan 16 11:53:46 2017 +0100

--
 docs/dev/migration.md | 33 +
 docs/index.md | 11 ++-
 2 files changed, 43 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/4b0c4d96/docs/dev/migration.md
--
diff --git a/docs/dev/migration.md b/docs/dev/migration.md
new file mode 100644
index 000..c74952c
--- /dev/null
+++ b/docs/dev/migration.md
@@ -0,0 +1,33 @@
+---
+title: "API Migration Guides"
+nav-parent_id: dev
+nav-pos: 100
+---
+
+
+* This will be replaced by the TOC
+{:toc}
+
+## Flink 1.1 to 1.2
+
+### State API
+
+### Fast Processing Time Window Operators
+

http://git-wip-us.apache.org/repos/asf/flink/blob/4b0c4d96/docs/index.md
--
diff --git a/docs/index.md b/docs/index.md
index 75b5328..595a094 100644
--- a/docs/index.md
+++ b/docs/index.md
@@ -29,8 +29,17 @@ Apache Flink is an open source platform for distributed 
stream and batch data pr
 
 ## First Steps
 
-- **Concepts**: Start with the basic concepts of Flink's [Dataflow Programming 
Model]({{ site.baseurl }}/concepts/programming-model.html) and [Distributed 
Runtime Environment]({{ site.baseurl }}/concepts/runtime.html). This will help 
you to fully understand the other parts of the documentation, including the 
setup and programming guides. It is highly recommended to read these sections 
first.
+- **Concepts**: Start with the basic concepts of Flink's [Dataflow Programming 
Model](concepts/programming-model.html) and [Distributed Runtime 
Environment](concepts/runtime.html). This will help you to fully understand the 
other parts of the documentation, including the setup and programming guides. 
It is highly recommended to read these sections first.
 
 - **Quickstarts**: [Run an example program](quickstart/setup_quickstart.html) 
on your local machine or [write a simple 
program](quickstart/run_example_quickstart.html) working on live Wikipedia 
edits.
 
 - **Programming Guides**: You can check out our guides about [basic 
concepts](dev/api_concepts.html) and the [DataStream 
API](dev/datastream_api.html) or [DataSet API](dev/batch/index.html) to learn 
how to write your first Flink programs.
+
+## Migration Guide
+
+For users that have used prior versions of Apache Flink we recommend checking 
out the [API migration guide](dev/migration.html).
+While all parts of the API that were marked as public and stable are still 
supported (backwards compatible), we suggest to migrate applications to the
+newer interfaces where applicable.
+
+For users that look to upgrade an operation Flink system to the latest 
version, we recommend to check out the guide on [upgrading Apache 
Flink](ops/upgrading.html)
+



[10/10] flink git commit: [FLINK-5457] [docs] Add documentation for asynchronous I/O

2017-01-16 Thread sewen
[FLINK-5457] [docs] Add documentation for asynchronous I/O


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/f4869a66
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/f4869a66
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/f4869a66

Branch: refs/heads/release-1.2
Commit: f4869a66d10468b804908dfe2564154cedc9aaa6
Parents: b41d0ff
Author: Stephan Ewen 
Authored: Sun Jan 15 19:41:36 2017 +0100
Committer: Stephan Ewen 
Committed: Mon Jan 16 11:54:18 2017 +0100

--
 .gitignore |   1 +
 docs/dev/stream/asyncio.md | 226 ++-
 docs/fig/async_io.svg  | 337 
 3 files changed, 562 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/f4869a66/.gitignore
--
diff --git a/.gitignore b/.gitignore
index 1b9c64e..9012d0a 100644
--- a/.gitignore
+++ b/.gitignore
@@ -29,5 +29,6 @@ out/
 /docs/.rubydeps
 /docs/ruby2/.bundle
 /docs/ruby2/.rubydeps
+/docs/.jekyll-metadata
 *.ipr
 *.iws

http://git-wip-us.apache.org/repos/asf/flink/blob/f4869a66/docs/dev/stream/asyncio.md
--
diff --git a/docs/dev/stream/asyncio.md b/docs/dev/stream/asyncio.md
index 2d0867a..abc0b24 100644
--- a/docs/dev/stream/asyncio.md
+++ b/docs/dev/stream/asyncio.md
@@ -1,5 +1,6 @@
 ---
-title: "Async I/O for External Data Access"
+title: "Asynchronous I/O for External Data Access"
+nav-title: "Async I/O"
 nav-parent_id: streaming
 nav-pos: 60
 ---
@@ -25,4 +26,225 @@ under the License.
 * ToC
 {:toc}
 
-**TDB**
+This page explains the use of Flink's API for asynchronous I/O with external 
data stores.
+For users not familiar with asynchronous or event-driven programming, an 
article about Futures and
+event-driven programming may be useful preparation.
+
+Note: Details about the design and implementation of the asynchronous I/O 
utility can be found in the proposal and design document
+[FLIP-12: Asynchronous I/O Design and 
Implementation](https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=65870673).
+
+
+## The need for Asynchronous I/O Operations
+
+When interacting with external systems (for example when enriching stream 
events with data stored in a database), one needs to take care
+that communication delay with the external system does not dominate the 
streaming application's total work.
+
+Naively accessing data in the external database, for example in a 
`MapFunction`, typically means **synchronous** interaction:
+A request is sent to the database and the `MapFunction` waits until the 
response has been received. In many cases, this waiting
+makes up the vast majority of the function's time.
+
+Asynchronous interaction with the database means that a single parallel 
function instance can handle many requests concurrently and
+receive the responses concurrently. That way, the waiting time can be 
overlayed with sending other requests and
+receiving responses. At the very least, the waiting time is amortized over 
multiple requests. This leads in most cased to much higher
+streaming throughput.
+
+
+
+*Note:* Improving throughput by just scaling the `MapFunction` to a very high 
parallelism is in some cases possible as well, but usually
+comes at a very high resource cost: Having many more parallel MapFunction 
instances means more tasks, threads, Flink-internal network
+connections, network connections to the database, buffers, and general 
internal bookkeeping overhead.
+
+
+## Prerequisites
+
+As illustrated in the section above, implementing proper asynchronous I/O to a 
database (or key/value store) requires a client
+to that database that supports asynchronous requests. Many popular databases 
offer such a client.
+
+In the absence of such a client, one can try and turn a synchronous client 
into a limited concurrent client by creating
+multiple clients and handling the synchronous calls with a thread pool. 
However, this approach is usually less
+efficient than a proper asynchronous client.
+
+
+## Async I/O API
+
+Flink's Async I/O API allows users to use asynchronous request clients with 
data streams. The API handles the integration with
+data streams, well as handling order, event time, fault tolerance, etc.
+
+Assuming one has an asynchronous client for the target database, three parts 
are needed to implement a stream transformation
+with asynchronous I/O against the database:
+
+  - An implementation of `AsyncFunction` that dispatches the requests
+  - A *callback* that takes the result of the operation and hands it to the 
`AsyncCollector`
+  - Applying the async I/O operation on a DataStream as a transformation
+
+The 

[09/10] flink git commit: [FLINK-5460] [docs] Add placeholder for Docker setup guide

2017-01-16 Thread sewen
[FLINK-5460] [docs] Add placeholder for Docker setup guide


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/b41d0ff8
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/b41d0ff8
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/b41d0ff8

Branch: refs/heads/release-1.2
Commit: b41d0ff8ffa6286a6040bae44ccaf2d4082708c5
Parents: ef185d7
Author: Stephan Ewen 
Authored: Tue Jan 10 23:04:52 2017 +0100
Committer: Stephan Ewen 
Committed: Mon Jan 16 11:54:04 2017 +0100

--
 docs/setup/aws.md|  2 +-
 docs/setup/deployment.md |  2 +-
 docs/setup/docker.md | 29 +
 docs/setup/gce_setup.md  |  2 +-
 4 files changed, 32 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/b41d0ff8/docs/setup/aws.md
--
diff --git a/docs/setup/aws.md b/docs/setup/aws.md
index d165955..8d04d59 100644
--- a/docs/setup/aws.md
+++ b/docs/setup/aws.md
@@ -2,7 +2,7 @@
 title: "Amazon Web Services (AWS)"
 nav-title: AWS
 nav-parent_id: deployment
-nav-pos: 4
+nav-pos: 10
 ---
 
+
+* This will be replaced by the TOC
+{:toc}
+
+

http://git-wip-us.apache.org/repos/asf/flink/blob/b41d0ff8/docs/setup/gce_setup.md
--
diff --git a/docs/setup/gce_setup.md b/docs/setup/gce_setup.md
index de38ce4..f9edfcc 100644
--- a/docs/setup/gce_setup.md
+++ b/docs/setup/gce_setup.md
@@ -2,7 +2,7 @@
 title:  "Google Compute Engine Setup"
 nav-title: Google Compute Engine
 nav-parent_id: deployment
-nav-pos: 5
+nav-pos: 20
 ---
 

[04/10] flink git commit: [hotfix] [docs] Move section about internal snapshot implementation from 'state_backends.md' to 'stream_checkpointing.md'

2017-01-16 Thread sewen
[hotfix] [docs] Move section about internal snapshot implementation from 
'state_backends.md' to 'stream_checkpointing.md'


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/ac193d6a
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/ac193d6a
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/ac193d6a

Branch: refs/heads/release-1.2
Commit: ac193d6a94ddb7e0fb0b86879ae25d979be11496
Parents: a562e3d
Author: Stephan Ewen 
Authored: Tue Jan 10 09:55:25 2017 +0100
Committer: Stephan Ewen 
Committed: Mon Jan 16 11:53:14 2017 +0100

--
 docs/internals/state_backends.md   | 12 
 docs/internals/stream_checkpointing.md | 14 +-
 2 files changed, 13 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/ac193d6a/docs/internals/state_backends.md
--
diff --git a/docs/internals/state_backends.md b/docs/internals/state_backends.md
index f6a4cc7..11d46ed 100644
--- a/docs/internals/state_backends.md
+++ b/docs/internals/state_backends.md
@@ -69,15 +69,3 @@ Examples are "ValueState", "ListState", etc. Flink's runtime 
encodes the states
 *Raw State* is state that users and operators keep in their own data 
structures. When checkpointed, they only write a sequence of bytes into
 the checkpoint. Flink knows nothing about the state's data structures and sees 
only the raw bytes.
 
-
-## Checkpointing Procedure
-
-When operator snapshots are taken, there are two parts: the **synchronous** 
and the **asynchronous** parts.
-
-Operators and state backends provide their snapshots as a Java `FutureTask`. 
That task contains the state where the *synchronous* part
-is completed and the *asynchronous* part is pending. The asynchronous part is 
then executed by a background thread for that checkpoint.
-
-Operators that checkpoint purely synchronously return an already completed 
`FutureTask`.
-If an asynchronous operation needs to be performed, it is executed in the 
`run()` method of that `FutureTask`.
-
-The tasks are cancelable, in order to release streams and other resource 
consuming handles.

http://git-wip-us.apache.org/repos/asf/flink/blob/ac193d6a/docs/internals/stream_checkpointing.md
--
diff --git a/docs/internals/stream_checkpointing.md 
b/docs/internals/stream_checkpointing.md
index 75493ca..e8b3e46 100644
--- a/docs/internals/stream_checkpointing.md
+++ b/docs/internals/stream_checkpointing.md
@@ -138,7 +138,7 @@ in *at least once* mode.
 
 Note that the above described mechanism implies that operators stop processing 
input records while they are storing a snapshot of their state in the *state 
backend*. This *synchronous* state snapshot introduces a delay every time a 
snapshot is taken.
 
-It is possible to let an operator continue processing while it stores its 
state snapshot, effectively letting the state snapshots happen *asynchronously* 
in the background. To do that, the operator must be able to produce a state 
object that should be stored in a way such that further modifications to the 
operator state do not affect that state object.
+It is possible to let an operator continue processing while it stores its 
state snapshot, effectively letting the state snapshots happen *asynchronously* 
in the background. To do that, the operator must be able to produce a state 
object that should be stored in a way such that further modifications to the 
operator state do not affect that state object. An example for that are 
*copy-on-write* style data structures, such as used for example in RocksDB.
 
 After receiving the checkpoint barriers on its inputs, the operator starts the 
asynchronous snapshot copying of its state. It immediately emits the barrier to 
its outputs and continues with the regular stream processing. Once the 
background copy process has completed, it acknowledges the checkpoint to the 
checkpoint coordinator (the JobManager). The checkpoint is now only complete 
after all sinks received the barriers and all stateful operators acknowledged 
their completed backup (which may be later than the barriers reaching the 
sinks).
 
@@ -152,3 +152,15 @@ entire distributed dataflow, and gives each operator the 
state that was snapshot
 stream from position Sk. For example in Apache Kafka, that 
means telling the consumer to start fetching from offset Sk.
 
 If state was snapshotted incrementally, the operators start with the state of 
the latest full snapshot and then apply a series of incremental snapshot 
updates to that state.
+
+## Operator Snapshot Implementation
+
+When operator snapshots are taken, there are two parts: the **synchronous** 
and the **asynchronous** 

[02/10] flink git commit: [FLINK-5454] [docs] Add stub for docs on "Tuning for large state"

2017-01-16 Thread sewen
[FLINK-5454] [docs] Add stub for docs on "Tuning for large state"


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/7aad7514
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/7aad7514
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/7aad7514

Branch: refs/heads/release-1.2
Commit: 7aad7514ab8c9c371d02b8e4641c64e4d460d78d
Parents: daad28a
Author: Stephan Ewen 
Authored: Mon Jan 9 20:01:38 2017 +0100
Committer: Stephan Ewen 
Committed: Mon Jan 16 11:52:50 2017 +0100

--
 docs/monitoring/README.md | 21 ++
 docs/monitoring/large_state_tuning.md | 62 ++
 docs/monitoring/rest_api.md   |  2 +-
 3 files changed, 84 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/7aad7514/docs/monitoring/README.md
--
diff --git a/docs/monitoring/README.md b/docs/monitoring/README.md
new file mode 100644
index 000..88c6509
--- /dev/null
+++ b/docs/monitoring/README.md
@@ -0,0 +1,21 @@
+
+
+This folder contains the documentation in the category
+**Debugging & Monitoring**.

http://git-wip-us.apache.org/repos/asf/flink/blob/7aad7514/docs/monitoring/large_state_tuning.md
--
diff --git a/docs/monitoring/large_state_tuning.md 
b/docs/monitoring/large_state_tuning.md
new file mode 100644
index 000..c49c106
--- /dev/null
+++ b/docs/monitoring/large_state_tuning.md
@@ -0,0 +1,62 @@
+---
+title: "Debugging and Tuning Checkpoints and Large State"
+nav-parent_id: monitoring
+nav-pos: 5
+---
+
+
+This page gives a guide how to improve and tune applications that use large 
state.
+
+* ToC
+{:toc}
+
+## Monitoring State and Checkpoints
+
+  - Checkpoint statistics overview
+  - Interpret time until checkpoints
+  - Synchronous vs. asynchronous checkpoint time
+
+## Tuning Checkpointing
+
+  - Checkpoint interval
+  - Getting work done between checkpoints (min time between checkpoints)
+
+## Tuning Network Buffers
+
+  - getting a good number of buffers to use
+  - monitoring if too many buffers cause too much inflight data
+
+## Make checkpointing asynchronous where possible
+
+  - large state should be on keyed state, not operator state, because keyed 
state is managed, operator state not (subject to change in future versions)
+
+  - asynchronous snapshots preferrable. long synchronous snapshot times can 
cause problems on large state and complex topogies. move to RocksDB for that
+
+## Tuning RocksDB
+
+  - Predefined options
+  - Custom Options
+
+## Capacity planning
+
+  - Normal operation should not be constantly back pressured (link to back 
pressure monitor)
+  - Allow for some excess capacity to support catch-up in case of failures and 
checkpoint alignment skew (due to data skew or bad nodes)
+
+

http://git-wip-us.apache.org/repos/asf/flink/blob/7aad7514/docs/monitoring/rest_api.md
--
diff --git a/docs/monitoring/rest_api.md b/docs/monitoring/rest_api.md
index 2da3726..d49dece 100644
--- a/docs/monitoring/rest_api.md
+++ b/docs/monitoring/rest_api.md
@@ -1,7 +1,7 @@
 ---
 title:  "Monitoring REST API"
 nav-parent_id: monitoring
-nav-pos: 3
+nav-pos: 10
 ---
 

[03/10] flink git commit: [FLINK-5455] [docs] Add stub for Upgrading Jobs and Framework

2017-01-16 Thread sewen
[FLINK-5455] [docs] Add stub for Upgrading Jobs and Framework


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/a562e3d9
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/a562e3d9
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/a562e3d9

Branch: refs/heads/release-1.2
Commit: a562e3d973cb069f5060ee96af2491e745efcac0
Parents: 7aad751
Author: Stephan Ewen 
Authored: Mon Jan 9 20:48:35 2017 +0100
Committer: Stephan Ewen 
Committed: Mon Jan 16 11:53:04 2017 +0100

--
 docs/ops/upgrading.md | 57 ++
 1 file changed, 57 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/a562e3d9/docs/ops/upgrading.md
--
diff --git a/docs/ops/upgrading.md b/docs/ops/upgrading.md
new file mode 100644
index 000..d94e884
--- /dev/null
+++ b/docs/ops/upgrading.md
@@ -0,0 +1,57 @@
+---
+title: "Upgrading Jobs and Flink Versions"
+nav-parent_id: setup
+nav-pos: 15
+---
+
+
+* ToC
+{:toc}
+
+## Upgrading Flink Streaming Applications
+
+  - Savepoint, stop/cancel, start from savepoint
+  - Atomic Savepoint and Stop (link to JIRA issue)
+
+  - Limitations: Breaking chaining behavior (link to Savepoint section)
+  - Encourage using `uid(...)` explicitly for every operator
+
+## Upgrading the Flink Framework Version
+
+  - Either "in place" : Savepoint -> stop/cancel -> shutdown cluster -> start 
new version -> start job 
+  - Another cluster variant : Savepoint -> resume in other cluster -> "flip 
switch" -> shutdown old cluster
+
+## Compatibility Table
+
+Savepoints are compatible across Flink versions as indicated by the table 
below:
+ 
+| Created with \ Resumed With | 1.1.x | 1.2.x |
+| ---:|:-:|:-:|
+| 1.1.x   |   X   |   X   |
+| 1.2.x   |   |   X   |
+
+
+
+## Special Considerations for Upgrades from Flink 1.1.x to Flink 1.2.x
+
+  - The parallelism of the Savepoint in Flink 1.1.x becomes the maximum 
parallelism in Flink 1.2.x.
+  - Increasing the parallelism for upgraded jobs is not possible out of the 
box.
+
+



flink git commit: [FLINK-5457] [docs] Add documentation for asynchronous I/O

2017-01-16 Thread sewen
Repository: flink
Updated Branches:
  refs/heads/master e2ba042c1 -> fb3761b57


[FLINK-5457] [docs] Add documentation for asynchronous I/O


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/fb3761b5
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/fb3761b5
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/fb3761b5

Branch: refs/heads/master
Commit: fb3761b578d6a7d956ca26a8be2ee88b3fbf8c46
Parents: e2ba042
Author: Stephan Ewen 
Authored: Sun Jan 15 19:41:36 2017 +0100
Committer: Stephan Ewen 
Committed: Mon Jan 16 11:47:57 2017 +0100

--
 .gitignore |   1 +
 docs/dev/stream/asyncio.md | 226 ++-
 docs/fig/async_io.svg  | 337 
 3 files changed, 562 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/fb3761b5/.gitignore
--
diff --git a/.gitignore b/.gitignore
index 1b9c64e..9012d0a 100644
--- a/.gitignore
+++ b/.gitignore
@@ -29,5 +29,6 @@ out/
 /docs/.rubydeps
 /docs/ruby2/.bundle
 /docs/ruby2/.rubydeps
+/docs/.jekyll-metadata
 *.ipr
 *.iws

http://git-wip-us.apache.org/repos/asf/flink/blob/fb3761b5/docs/dev/stream/asyncio.md
--
diff --git a/docs/dev/stream/asyncio.md b/docs/dev/stream/asyncio.md
index 2d0867a..abc0b24 100644
--- a/docs/dev/stream/asyncio.md
+++ b/docs/dev/stream/asyncio.md
@@ -1,5 +1,6 @@
 ---
-title: "Async I/O for External Data Access"
+title: "Asynchronous I/O for External Data Access"
+nav-title: "Async I/O"
 nav-parent_id: streaming
 nav-pos: 60
 ---
@@ -25,4 +26,225 @@ under the License.
 * ToC
 {:toc}
 
-**TDB**
+This page explains the use of Flink's API for asynchronous I/O with external 
data stores.
+For users not familiar with asynchronous or event-driven programming, an 
article about Futures and
+event-driven programming may be useful preparation.
+
+Note: Details about the design and implementation of the asynchronous I/O 
utility can be found in the proposal and design document
+[FLIP-12: Asynchronous I/O Design and 
Implementation](https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=65870673).
+
+
+## The need for Asynchronous I/O Operations
+
+When interacting with external systems (for example when enriching stream 
events with data stored in a database), one needs to take care
+that communication delay with the external system does not dominate the 
streaming application's total work.
+
+Naively accessing data in the external database, for example in a 
`MapFunction`, typically means **synchronous** interaction:
+A request is sent to the database and the `MapFunction` waits until the 
response has been received. In many cases, this waiting
+makes up the vast majority of the function's time.
+
+Asynchronous interaction with the database means that a single parallel 
function instance can handle many requests concurrently and
+receive the responses concurrently. That way, the waiting time can be 
overlayed with sending other requests and
+receiving responses. At the very least, the waiting time is amortized over 
multiple requests. This leads in most cased to much higher
+streaming throughput.
+
+
+
+*Note:* Improving throughput by just scaling the `MapFunction` to a very high 
parallelism is in some cases possible as well, but usually
+comes at a very high resource cost: Having many more parallel MapFunction 
instances means more tasks, threads, Flink-internal network
+connections, network connections to the database, buffers, and general 
internal bookkeeping overhead.
+
+
+## Prerequisites
+
+As illustrated in the section above, implementing proper asynchronous I/O to a 
database (or key/value store) requires a client
+to that database that supports asynchronous requests. Many popular databases 
offer such a client.
+
+In the absence of such a client, one can try and turn a synchronous client 
into a limited concurrent client by creating
+multiple clients and handling the synchronous calls with a thread pool. 
However, this approach is usually less
+efficient than a proper asynchronous client.
+
+
+## Async I/O API
+
+Flink's Async I/O API allows users to use asynchronous request clients with 
data streams. The API handles the integration with
+data streams, well as handling order, event time, fault tolerance, etc.
+
+Assuming one has an asynchronous client for the target database, three parts 
are needed to implement a stream transformation
+with asynchronous I/O against the database:
+
+  - An implementation of `AsyncFunction` that dispatches the requests
+  - A *callback* that takes the result of the operation and hands it to the 
`AsyncCollector`
+  - 

[2/9] flink git commit: [FLINK-5448] [checkpoints] Fix typo in StateAssignmentOperation Exception

2017-01-16 Thread sewen
[FLINK-5448] [checkpoints] Fix typo in StateAssignmentOperation Exception

This closes #3097


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/fa67ef40
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/fa67ef40
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/fa67ef40

Branch: refs/heads/master
Commit: fa67ef409c9d0d152d22c74e3ace4d56bc8aa7da
Parents: 475c0b1
Author: mtunique 
Authored: Thu Jan 12 11:55:57 2017 +0800
Committer: Stephan Ewen 
Committed: Mon Jan 16 20:18:47 2017 +0100

--
 .../flink/runtime/checkpoint/StateAssignmentOperation.java   | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/fa67ef40/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/StateAssignmentOperation.java
--
diff --git 
a/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/StateAssignmentOperation.java
 
b/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/StateAssignmentOperation.java
index f11f69b..6c23f02 100644
--- 
a/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/StateAssignmentOperation.java
+++ 
b/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/StateAssignmentOperation.java
@@ -109,8 +109,8 @@ public class StateAssignmentOperation {
if (hasNonPartitionedState && parallelismChanged) {
throw new IllegalStateException("Cannot restore 
the latest checkpoint because " +
"the operator " + 
executionJobVertex.getJobVertexId() + " has non-partitioned " +
-   "state and its parallelism 
changed. The operator" + executionJobVertex.getJobVertexId() +
-   " has parallelism " + 
newParallelism + " whereas the corresponding" +
+   "state and its parallelism 
changed. The operator " + executionJobVertex.getJobVertexId() +
+   " has parallelism " + 
newParallelism + " whereas the corresponding " +
"state object has a parallelism 
of " + oldParallelism);
}
 



[5/9] flink git commit: [FLINK-3617] [scala apis] Added null value check.

2017-01-16 Thread sewen
[FLINK-3617] [scala apis] Added null value check.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/fdce1f31
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/fdce1f31
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/fdce1f31

Branch: refs/heads/master
Commit: fdce1f319c512fc845b64cbb7cbfb10f9d899021
Parents: c4626cb
Author: Aleksandr Chermenin 
Authored: Fri Dec 16 14:42:50 2016 +0300
Committer: Stephan Ewen 
Committed: Mon Jan 16 20:18:48 2017 +0100

--
 .../flink/api/scala/typeutils/CaseClassSerializer.scala  | 11 +--
 1 file changed, 9 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/fdce1f31/flink-scala/src/main/scala/org/apache/flink/api/scala/typeutils/CaseClassSerializer.scala
--
diff --git 
a/flink-scala/src/main/scala/org/apache/flink/api/scala/typeutils/CaseClassSerializer.scala
 
b/flink-scala/src/main/scala/org/apache/flink/api/scala/typeutils/CaseClassSerializer.scala
index 625ee80..29b4952 100644
--- 
a/flink-scala/src/main/scala/org/apache/flink/api/scala/typeutils/CaseClassSerializer.scala
+++ 
b/flink-scala/src/main/scala/org/apache/flink/api/scala/typeutils/CaseClassSerializer.scala
@@ -20,7 +20,8 @@ package org.apache.flink.api.scala.typeutils
 import org.apache.flink.annotation.Internal
 import org.apache.flink.api.common.typeutils.TypeSerializer
 import org.apache.flink.api.java.typeutils.runtime.TupleSerializerBase
-import org.apache.flink.core.memory.{DataOutputView, DataInputView}
+import org.apache.flink.core.memory.{DataInputView, DataOutputView}
+import org.apache.flink.types.NullFieldException
 
 /**
  * Serializer for Case Classes. Creation and access is different from
@@ -97,7 +98,13 @@ abstract class CaseClassSerializer[T <: Product](
 var i = 0
 while (i < arity) {
   val serializer = fieldSerializers(i).asInstanceOf[TypeSerializer[Any]]
-  serializer.serialize(value.productElement(i), target)
+  val o = value.productElement(i)
+  try
+serializer.serialize(o, target)
+  catch {
+case e: NullPointerException =>
+  throw new NullFieldException(i, e)
+  }
   i += 1
 }
   }



[6/9] flink git commit: [FLINK-5345] [core] Migrate various cleanup calls to concurrency-safe directory deletion

2017-01-16 Thread sewen
[FLINK-5345] [core] Migrate various cleanup calls to concurrency-safe directory 
deletion


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/c4626cba
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/c4626cba
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/c4626cba

Branch: refs/heads/master
Commit: c4626cbae074ba288e54308c40f93258e14c9667
Parents: 8742ff1
Author: Stephan Ewen 
Authored: Thu Jan 12 10:49:13 2017 +0100
Committer: Stephan Ewen 
Committed: Mon Jan 16 20:18:48 2017 +0100

--
 .../contrib/streaming/state/RocksDBKeyedStateBackend.java  | 4 +++-
 .../org/apache/flink/runtime/webmonitor/WebRuntimeMonitor.java | 6 +-
 .../src/main/java/org/apache/flink/runtime/blob/BlobCache.java | 3 ++-
 .../main/java/org/apache/flink/runtime/blob/BlobServer.java| 3 ++-
 .../org/apache/flink/runtime/io/disk/iomanager/IOManager.java  | 2 +-
 5 files changed, 13 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/c4626cba/flink-contrib/flink-statebackend-rocksdb/src/main/java/org/apache/flink/contrib/streaming/state/RocksDBKeyedStateBackend.java
--
diff --git 
a/flink-contrib/flink-statebackend-rocksdb/src/main/java/org/apache/flink/contrib/streaming/state/RocksDBKeyedStateBackend.java
 
b/flink-contrib/flink-statebackend-rocksdb/src/main/java/org/apache/flink/contrib/streaming/state/RocksDBKeyedStateBackend.java
index 71e2c79..b207af6 100644
--- 
a/flink-contrib/flink-statebackend-rocksdb/src/main/java/org/apache/flink/contrib/streaming/state/RocksDBKeyedStateBackend.java
+++ 
b/flink-contrib/flink-statebackend-rocksdb/src/main/java/org/apache/flink/contrib/streaming/state/RocksDBKeyedStateBackend.java
@@ -17,7 +17,6 @@
 
 package org.apache.flink.contrib.streaming.state;
 
-import org.apache.commons.io.FileUtils;
 import org.apache.flink.api.common.JobID;
 import org.apache.flink.api.common.state.FoldingState;
 import org.apache.flink.api.common.state.FoldingStateDescriptor;
@@ -54,8 +53,10 @@ import 
org.apache.flink.runtime.state.KeyedBackendSerializationProxy;
 import org.apache.flink.runtime.state.RegisteredBackendStateMetaInfo;
 import org.apache.flink.runtime.state.StreamStateHandle;
 import org.apache.flink.runtime.util.SerializableObject;
+import org.apache.flink.util.FileUtils;
 import org.apache.flink.util.InstantiationUtil;
 import org.apache.flink.util.Preconditions;
+
 import org.rocksdb.ColumnFamilyDescriptor;
 import org.rocksdb.ColumnFamilyHandle;
 import org.rocksdb.ColumnFamilyOptions;
@@ -65,6 +66,7 @@ import org.rocksdb.RocksDB;
 import org.rocksdb.RocksDBException;
 import org.rocksdb.RocksIterator;
 import org.rocksdb.Snapshot;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 

http://git-wip-us.apache.org/repos/asf/flink/blob/c4626cba/flink-runtime-web/src/main/java/org/apache/flink/runtime/webmonitor/WebRuntimeMonitor.java
--
diff --git 
a/flink-runtime-web/src/main/java/org/apache/flink/runtime/webmonitor/WebRuntimeMonitor.java
 
b/flink-runtime-web/src/main/java/org/apache/flink/runtime/webmonitor/WebRuntimeMonitor.java
index 3080b57..92c2e36 100644
--- 
a/flink-runtime-web/src/main/java/org/apache/flink/runtime/webmonitor/WebRuntimeMonitor.java
+++ 
b/flink-runtime-web/src/main/java/org/apache/flink/runtime/webmonitor/WebRuntimeMonitor.java
@@ -19,6 +19,7 @@
 package org.apache.flink.runtime.webmonitor;
 
 import akka.actor.ActorSystem;
+
 import io.netty.bootstrap.ServerBootstrap;
 import io.netty.channel.Channel;
 import io.netty.channel.ChannelFuture;
@@ -31,7 +32,7 @@ import io.netty.handler.codec.http.router.Handler;
 import io.netty.handler.codec.http.router.Router;
 import io.netty.handler.ssl.SslHandler;
 import io.netty.handler.stream.ChunkedWriteHandler;
-import org.apache.commons.io.FileUtils;
+
 import org.apache.flink.api.common.time.Time;
 import org.apache.flink.configuration.ConfigConstants;
 import org.apache.flink.configuration.Configuration;
@@ -81,8 +82,11 @@ import 
org.apache.flink.runtime.webmonitor.metrics.JobMetricsHandler;
 import org.apache.flink.runtime.webmonitor.metrics.JobVertexMetricsHandler;
 import org.apache.flink.runtime.webmonitor.metrics.MetricFetcher;
 import org.apache.flink.runtime.webmonitor.metrics.TaskManagerMetricsHandler;
+import org.apache.flink.util.FileUtils;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
 import scala.concurrent.ExecutionContext$;
 import scala.concurrent.ExecutionContextExecutor;
 import scala.concurrent.Promise;

http://git-wip-us.apache.org/repos/asf/flink/blob/c4626cba/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobCache.java

[4/9] flink git commit: [FLINK-4450] [storm compat] Update storm version to 1.0

2017-01-16 Thread sewen
[FLINK-4450] [storm compat] Update storm version to 1.0

This closes #3037


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/475c0b1a
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/475c0b1a
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/475c0b1a

Branch: refs/heads/master
Commit: 475c0b1a6c74744e3431b268bc1a2ee764052cf1
Parents: ef8cdfe
Author: yuzhongliu 
Authored: Thu Dec 22 11:36:37 2016 +0800
Committer: Stephan Ewen 
Committed: Mon Jan 16 20:18:47 2017 +0100

--
 flink-contrib/flink-storm-examples/pom.xml  |  88 ---
 .../storm/exclamation/ExclamationLocal.java |  10 +-
 .../storm/exclamation/ExclamationTopology.java  |   4 +-
 .../storm/exclamation/ExclamationWithBolt.java  |   4 +-
 .../storm/exclamation/ExclamationWithSpout.java |   4 +-
 .../exclamation/operators/ExclamationBolt.java  |  14 +-
 .../flink/storm/join/SingleJoinExample.java |  14 +-
 .../flink/storm/print/PrintSampleStream.java|  10 +-
 .../storm/split/operators/RandomSpout.java  |  12 +-
 .../split/operators/VerifyAndEnrichBolt.java|  14 +-
 .../flink/storm/util/AbstractBoltSink.java  |  10 +-
 .../flink/storm/util/AbstractLineSpout.java |  10 +-
 .../apache/flink/storm/util/BoltFileSink.java   |   2 +-
 .../apache/flink/storm/util/BoltPrintSink.java  |   2 +-
 .../org/apache/flink/storm/util/FileSpout.java  |   6 +-
 .../flink/storm/util/FiniteFileSpout.java   |   6 +-
 .../apache/flink/storm/util/InMemorySpout.java  |   2 +-
 .../flink/storm/util/OutputFormatter.java   |   2 +-
 .../flink/storm/util/SimpleOutputFormatter.java |   2 +-
 .../flink/storm/util/TupleOutputFormatter.java  |   2 +-
 .../storm/wordcount/BoltTokenizerWordCount.java |   2 +-
 .../wordcount/BoltTokenizerWordCountPojo.java   |   2 +-
 .../BoltTokenizerWordCountWithNames.java|   4 +-
 .../storm/wordcount/SpoutSourceWordCount.java   |   4 +-
 .../flink/storm/wordcount/WordCountLocal.java   |   8 +-
 .../storm/wordcount/WordCountLocalByName.java   |   8 +-
 .../wordcount/WordCountRemoteByClient.java  |  16 +-
 .../wordcount/WordCountRemoteBySubmitter.java   |   8 +-
 .../storm/wordcount/WordCountTopology.java  |   6 +-
 .../storm/wordcount/operators/BoltCounter.java  |  14 +-
 .../wordcount/operators/BoltCounterByName.java  |  14 +-
 .../wordcount/operators/BoltTokenizer.java  |  14 +-
 .../operators/BoltTokenizerByName.java  |  14 +-
 .../wordcount/operators/WordCountFileSpout.java |   4 +-
 .../operators/WordCountInMemorySpout.java   |   4 +-
 .../org/apache/flink/storm/split/SplitBolt.java |  14 +-
 .../flink/storm/split/SplitBoltTopology.java|   2 +-
 .../flink/storm/split/SplitSpoutTopology.java   |   2 +-
 .../flink/storm/split/SplitStreamBoltLocal.java |   4 +-
 .../storm/split/SplitStreamSpoutLocal.java  |   4 +-
 .../storm/tests/StormFieldsGroupingITCase.java  |   6 +-
 .../flink/storm/tests/StormMetaDataITCase.java  |   4 +-
 .../flink/storm/tests/StormUnionITCase.java |   4 +-
 .../tests/operators/FiniteRandomSpout.java  |  14 +-
 .../flink/storm/tests/operators/MergerBolt.java |  12 +-
 .../storm/tests/operators/MetaDataSpout.java|  12 +-
 .../flink/storm/tests/operators/TaskIdBolt.java |  14 +-
 .../tests/operators/VerifyMetaDataBolt.java |  16 +-
 flink-contrib/flink-storm/pom.xml   |  59 +++--
 .../org/apache/flink/storm/api/FlinkClient.java |  20 +-
 .../flink/storm/api/FlinkLocalCluster.java  |  14 +-
 .../storm/api/FlinkOutputFieldsDeclarer.java|   8 +-
 .../apache/flink/storm/api/FlinkSubmitter.java  |  12 +-
 .../apache/flink/storm/api/FlinkTopology.java   |  18 +-
 .../flink/storm/api/StormFlinkStreamMerger.java |   6 +-
 .../flink/storm/api/TwoFlinkStreamsMerger.java  |   6 +-
 .../apache/flink/storm/util/FiniteSpout.java|   2 +-
 .../flink/storm/util/NullTerminatingSpout.java  |   8 +-
 .../util/SpoutOutputCollectorObserver.java  |   4 +-
 .../apache/flink/storm/util/StormConfig.java| 244 +--
 .../flink/storm/wrappers/BoltCollector.java |   8 +-
 .../flink/storm/wrappers/BoltWrapper.java   |  18 +-
 .../storm/wrappers/FlinkTopologyContext.java|  20 +-
 .../storm/wrappers/MergedInputsBoltWrapper.java |   2 +-
 .../wrappers/SetupOutputFieldsDeclarer.java |   6 +-
 .../flink/storm/wrappers/SpoutCollector.java|   7 +-
 .../flink/storm/wrappers/SpoutWrapper.java  |   8 +-
 .../apache/flink/storm/wrappers/StormTuple.java |  20 +-
 .../storm/wrappers/WrapperSetupHelper.java  |  24 +-
 .../api/FlinkOutputFieldsDeclarerTest.java  |   4 +-
 .../flink/storm/api/FlinkTopologyTest.java  |   4 +-
 .../org/apache/flink/storm/api/TestBolt.java|  10 +-
 .../org/apache/flink/storm/api/TestSpout.java   |   8 +-
 .../flink/storm/util/FiniteTestSpout.java   |  12 +-
 

[8/9] flink git commit: [FLINK-5345] [core] Add a utility to delete directories without failing in the presence of concurrent deletes

2017-01-16 Thread sewen
[FLINK-5345] [core] Add a utility to delete directories without failing in the 
presence of concurrent deletes


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/8742ff1b
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/8742ff1b
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/8742ff1b

Branch: refs/heads/master
Commit: 8742ff1ba4c345e9aa8fd0adc207930cdef959a6
Parents: faee74e
Author: Stephan Ewen 
Authored: Wed Jan 11 21:05:57 2017 +0100
Committer: Stephan Ewen 
Committed: Mon Jan 16 20:18:48 2017 +0100

--
 .../java/org/apache/flink/util/FileUtils.java   | 171 +--
 .../org/apache/flink/util/FileUtilsTest.java| 162 ++
 2 files changed, 319 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/8742ff1b/flink-core/src/main/java/org/apache/flink/util/FileUtils.java
--
diff --git a/flink-core/src/main/java/org/apache/flink/util/FileUtils.java 
b/flink-core/src/main/java/org/apache/flink/util/FileUtils.java
index 23f5eb9..0d527d5 100644
--- a/flink-core/src/main/java/org/apache/flink/util/FileUtils.java
+++ b/flink-core/src/main/java/org/apache/flink/util/FileUtils.java
@@ -23,27 +23,28 @@ import org.apache.flink.core.fs.FileSystem;
 import org.apache.flink.core.fs.Path;
 
 import java.io.File;
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.nio.file.Files;
+import java.nio.file.NoSuchFileException;
 import java.nio.file.StandardOpenOption;
 
+import static org.apache.flink.util.Preconditions.checkNotNull;
+
 /**
- * This is a utility class to deal with temporary files.
+ * This is a utility class to deal files and directories. Contains utilities 
for recursive
+ * deletion and creation of temporary files.
  */
 public final class FileUtils {
 
-   /**
-* The alphabet to construct the random part of the filename from.
-*/
-   private static final char[] ALPHABET = { '0', '1', '2', '3', '4', '5', 
'6', '7', '8', '9', '0', 'a', 'b', 'c', 'd',
-   'e', 'f' };
+   /** The alphabet to construct the random part of the filename from. */
+   private static final char[] ALPHABET = 
+   { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 
'0', 'a', 'b', 'c', 'd', 'e', 'f' };
 
-   /**
-* The length of the random part of the filename.
-*/
-   private static final int LENGTH = 12;
+   /** The length of the random part of the filename. */
+   private static final int RANDOM_FILE_NAME_LENGTH = 12;
 
-   
+   // 

 
/**
 * Constructs a random filename with the given prefix and
@@ -54,10 +55,9 @@ public final class FileUtils {
 * @return the generated random filename with the given prefix
 */
public static String getRandomFilename(final String prefix) {
-
final StringBuilder stringBuilder = new StringBuilder(prefix);
 
-   for (int i = 0; i < LENGTH; i++) {
+   for (int i = 0; i < RANDOM_FILE_NAME_LENGTH; i++) {
stringBuilder.append(ALPHABET[(int) 
Math.floor(Math.random() * (double) ALPHABET.length)]);
}
 
@@ -87,7 +87,150 @@ public final class FileUtils {
}
 
// 

-   //  Deleting directories
+   //  Deleting directories on standard File Systems
+   // 

+
+   /**
+* Removes the given file or directory recursively.
+* 
+* If the file or directory does not exist, this does not throw an 
exception, but simply does nothing.
+* It considers the fact that a file-to-be-deleted is not present a 
success.
+* 
+* This method is safe against other concurrent deletion attempts.
+* 
+* @param file The file or directory to delete.
+* 
+* @throws IOException Thrown if the directory could not be cleaned for 
some reason, for example
+* due to missing access/write permissions.
+*/
+   public static void deleteFileOrDirectory(File file) throws IOException {
+   checkNotNull(file, "file");
+
+   if (file.isDirectory()) {
+   // file exists and is directory
+   deleteDirectory(file);
+   }
+   else if (file.exists()) {
+   try {
+   Files.delete(file.toPath());
+   }
+   

[9/9] flink git commit: [FLINK-4959] [docs] Add documentation for ProcessFunction

2017-01-16 Thread sewen
[FLINK-4959] [docs] Add documentation for ProcessFunction


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/7a339a65
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/7a339a65
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/7a339a65

Branch: refs/heads/master
Commit: 7a339a65f13bfccec1f374e035d557290b45bd01
Parents: fdce1f3
Author: Stephan Ewen 
Authored: Mon Jan 16 20:17:13 2017 +0100
Committer: Stephan Ewen 
Committed: Mon Jan 16 21:10:20 2017 +0100

--
 docs/concepts/programming-model.md  |  60 ++--
 docs/concepts/runtime.md|  10 +-
 docs/dev/stream/process_function.md | 230 +++
 docs/fig/levels_of_abstraction.svg  | 193 ++
 4 files changed, 474 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/7a339a65/docs/concepts/programming-model.md
--
diff --git a/docs/concepts/programming-model.md 
b/docs/concepts/programming-model.md
index 5ab6b8f..3d2aebb 100644
--- a/docs/concepts/programming-model.md
+++ b/docs/concepts/programming-model.md
@@ -27,11 +27,47 @@ under the License.
 * This will be replaced by the TOC
 {:toc}
 
+## Levels of Abstraction
+
+Flink offers different levels of abstraction to develop streaming/batch 
applications.
+
+
+
+  - The lowest level abstraction simply offers **stateful streaming**. It is 
embedded into the [DataStream API](../dev/datastream_api.html)
+via the [Process Function](../dev/stream/process_function.html). It allows 
users freely process events from one or more streams,
+and use consistent fault tolerant *state*. In addition, users can register 
event time and processing time callbacks,
+allowing programs to realize sophisticated computations.
+
+  - In practice, most applications would not need the above described low 
level abstraction, but would instead program against the
+**Core APIs** like the [DataStream API](../dev/datastream_api.html) 
(bounded/unbounded streams) and the [DataSet API](../dev/batch/index.html)
+(bounded data sets). These fluent APIs offer the common building blocks 
for data processing, like various forms of user-specified
+transformations, joins, aggregations, windows, state, etc. Data types 
processed in these APIs are represented as classes
+in the respective programming languages.
+
+The low level *Process Function* integrates with the *DataStream API*, 
making it possible to go the lower level abstraction 
+for certain operations only. The *DataSet API* offers additional 
primitives on bounded data sets, like loops/iterations.
+
+  - The **Table API** is a declarative DSL centered around *tables*, which may 
be dynamically changing tables (when representing streams).
+The Table API follows the (extended) relational model: Tables have a 
schema attached (similar to tables in relational databases)
+and the API offers comparable operations, such as select, project, join, 
group-by, aggregate, etc.
+Table API programs declaratively define *what logical operation should be 
done* rather than specifying exactly
+   *how the code for the operation looks*. Though the Table API is extensible 
by various types of user-defined
+functions, it is less expressive than the *Core APIs*, but more concise to 
use (less code to write).
+In addition, Table API programs also go through an optimizer that applies 
optimization rules before execution.
+
+One can seamlessly convert between tables and *DataStream*/*DataSet*, 
allowing programs to mix *Table API* and with the *DataStream*
+and *DataSet* APIs.
+
+  - The highest level abstraction offered by Flink is **SQL**. This 
abstraction is similar to the *Table API* both in semantics and
+expressiveness, but represents programs as SQL query expressions.
+The SQL abstraction closely interacts with the Table API, and SQL queries 
can be executed over tables defined in the *Table API*.
+
+
 ## Programs and Dataflows
 
 The basic building blocks of Flink programs are **streams** and 
**transformations**. (Note that the
-DataSets used in Flink's batch API are also streams internally -- more about 
that
-later.) Conceptually a *stream* is a never-ending flow of data records, and a 
*transformation* is an
+DataSets used in Flink's DataSet API are also streams internally -- more about 
that
+later.) Conceptually a *stream* is a (potentially never-ending) flow of data 
records, and a *transformation* is an
 operation that takes one or more streams as input, and produces one or more 
output streams as a
 result.
 
@@ -40,7 +76,7 @@ Each dataflow starts with one or more **sources** and ends in 
one or more **sink
 arbitrary 

[1/9] flink git commit: [FLINK-5438] [streaming api] Typo in JobGraph generator Exception

2017-01-16 Thread sewen
Repository: flink
Updated Branches:
  refs/heads/master ef8cdfe59 -> 7a339a65f


[FLINK-5438] [streaming api] Typo in JobGraph generator Exception

This closes #3098


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/c2f28c01
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/c2f28c01
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/c2f28c01

Branch: refs/heads/master
Commit: c2f28c013116328583043ca1433c45c85e32de30
Parents: fa67ef4
Author: mtunique 
Authored: Thu Jan 12 12:02:49 2017 +0800
Committer: Stephan Ewen 
Committed: Mon Jan 16 20:18:47 2017 +0100

--
 .../flink/streaming/api/graph/StreamingJobGraphGenerator.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/c2f28c01/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/graph/StreamingJobGraphGenerator.java
--
diff --git 
a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/graph/StreamingJobGraphGenerator.java
 
b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/graph/StreamingJobGraphGenerator.java
index 0cb7d9a..1bfaf3f 100644
--- 
a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/graph/StreamingJobGraphGenerator.java
+++ 
b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/graph/StreamingJobGraphGenerator.java
@@ -317,7 +317,7 @@ public class StreamingJobGraphGenerator {
// the parallelism should always be smaller or equal 
than the max parallelism
throw new IllegalStateException("The maximum 
parallelism (" + maxParallelism + ") of " +
"the stream node " + streamNode + " is smaller 
than the parallelism (" +
-   parallelism + "). Increase the maximum 
parallelism or decrease the parallelism of" +
+   parallelism + "). Increase the maximum 
parallelism or decrease the parallelism of " +
"this operator.");
} else {

jobVertex.setMaxParallelism(streamNode.getMaxParallelism());



[3/9] flink git commit: [FLINK-4450] [storm compat] Update storm version to 1.0

2017-01-16 Thread sewen
http://git-wip-us.apache.org/repos/asf/flink/blob/475c0b1a/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/util/FiniteSpout.java
--
diff --git 
a/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/util/FiniteSpout.java
 
b/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/util/FiniteSpout.java
index 99c2583..10f9797 100644
--- 
a/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/util/FiniteSpout.java
+++ 
b/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/util/FiniteSpout.java
@@ -17,7 +17,7 @@
 
 package org.apache.flink.storm.util;
 
-import backtype.storm.topology.IRichSpout;
+import org.apache.storm.topology.IRichSpout;
 
 /**
  * This interface represents a spout that emits a finite number of records. 
Common spouts emit infinite streams by

http://git-wip-us.apache.org/repos/asf/flink/blob/475c0b1a/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/util/NullTerminatingSpout.java
--
diff --git 
a/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/util/NullTerminatingSpout.java
 
b/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/util/NullTerminatingSpout.java
index 23d9d70..20e3309 100644
--- 
a/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/util/NullTerminatingSpout.java
+++ 
b/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/util/NullTerminatingSpout.java
@@ -19,10 +19,10 @@ package org.apache.flink.storm.util;
 
 import java.util.Map;
 
-import backtype.storm.spout.SpoutOutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.IRichSpout;
-import backtype.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.spout.SpoutOutputCollector;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.topology.IRichSpout;
+import org.apache.storm.topology.OutputFieldsDeclarer;
 
 /**
  * {@link NullTerminatingSpout} in a finite spout (ie, implements {@link 
FiniteSpout} interface) that wraps an

http://git-wip-us.apache.org/repos/asf/flink/blob/475c0b1a/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/util/SpoutOutputCollectorObserver.java
--
diff --git 
a/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/util/SpoutOutputCollectorObserver.java
 
b/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/util/SpoutOutputCollectorObserver.java
index b79cc4e..9e222ec 100644
--- 
a/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/util/SpoutOutputCollectorObserver.java
+++ 
b/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/util/SpoutOutputCollectorObserver.java
@@ -19,8 +19,8 @@ package org.apache.flink.storm.util;
 
 import java.util.List;
 
-import backtype.storm.spout.SpoutOutputCollector;
-import backtype.storm.utils.Utils;
+import org.apache.storm.spout.SpoutOutputCollector;
+import org.apache.storm.utils.Utils;
 
 /**
  * Observes if a call to any {@code emit(...)} or {@code emitDirect(...)} 
method is made.

http://git-wip-us.apache.org/repos/asf/flink/blob/475c0b1a/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/util/StormConfig.java
--
diff --git 
a/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/util/StormConfig.java
 
b/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/util/StormConfig.java
index 38ce58c..040c395 100644
--- 
a/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/util/StormConfig.java
+++ 
b/flink-contrib/flink-storm/src/main/java/org/apache/flink/storm/util/StormConfig.java
@@ -1,122 +1,122 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.flink.storm.util;
-
-import backtype.storm.Config;
-import org.apache.flink.api.common.ExecutionConfig.GlobalJobParameters;
-
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Set;
-
-/**
- * {@link StormConfig} is used to provide a user-defined Storm 

[7/7] flink git commit: [FLINK-4959] [docs] Add documentation for ProcessFunction

2017-01-16 Thread sewen
[FLINK-4959] [docs] Add documentation for ProcessFunction


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/2eb926f2
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/2eb926f2
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/2eb926f2

Branch: refs/heads/release-1.2
Commit: 2eb926f2bed5723f160620b94f3b67e5dc418387
Parents: 27c11e1
Author: Stephan Ewen 
Authored: Mon Jan 16 20:17:13 2017 +0100
Committer: Stephan Ewen 
Committed: Mon Jan 16 21:17:03 2017 +0100

--
 docs/concepts/programming-model.md  |  60 ++--
 docs/concepts/runtime.md|  10 +-
 docs/dev/stream/process_function.md | 230 +++
 docs/fig/levels_of_abstraction.svg  | 193 ++
 4 files changed, 474 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/2eb926f2/docs/concepts/programming-model.md
--
diff --git a/docs/concepts/programming-model.md 
b/docs/concepts/programming-model.md
index 5ab6b8f..3d2aebb 100644
--- a/docs/concepts/programming-model.md
+++ b/docs/concepts/programming-model.md
@@ -27,11 +27,47 @@ under the License.
 * This will be replaced by the TOC
 {:toc}
 
+## Levels of Abstraction
+
+Flink offers different levels of abstraction to develop streaming/batch 
applications.
+
+
+
+  - The lowest level abstraction simply offers **stateful streaming**. It is 
embedded into the [DataStream API](../dev/datastream_api.html)
+via the [Process Function](../dev/stream/process_function.html). It allows 
users freely process events from one or more streams,
+and use consistent fault tolerant *state*. In addition, users can register 
event time and processing time callbacks,
+allowing programs to realize sophisticated computations.
+
+  - In practice, most applications would not need the above described low 
level abstraction, but would instead program against the
+**Core APIs** like the [DataStream API](../dev/datastream_api.html) 
(bounded/unbounded streams) and the [DataSet API](../dev/batch/index.html)
+(bounded data sets). These fluent APIs offer the common building blocks 
for data processing, like various forms of user-specified
+transformations, joins, aggregations, windows, state, etc. Data types 
processed in these APIs are represented as classes
+in the respective programming languages.
+
+The low level *Process Function* integrates with the *DataStream API*, 
making it possible to go the lower level abstraction 
+for certain operations only. The *DataSet API* offers additional 
primitives on bounded data sets, like loops/iterations.
+
+  - The **Table API** is a declarative DSL centered around *tables*, which may 
be dynamically changing tables (when representing streams).
+The Table API follows the (extended) relational model: Tables have a 
schema attached (similar to tables in relational databases)
+and the API offers comparable operations, such as select, project, join, 
group-by, aggregate, etc.
+Table API programs declaratively define *what logical operation should be 
done* rather than specifying exactly
+   *how the code for the operation looks*. Though the Table API is extensible 
by various types of user-defined
+functions, it is less expressive than the *Core APIs*, but more concise to 
use (less code to write).
+In addition, Table API programs also go through an optimizer that applies 
optimization rules before execution.
+
+One can seamlessly convert between tables and *DataStream*/*DataSet*, 
allowing programs to mix *Table API* and with the *DataStream*
+and *DataSet* APIs.
+
+  - The highest level abstraction offered by Flink is **SQL**. This 
abstraction is similar to the *Table API* both in semantics and
+expressiveness, but represents programs as SQL query expressions.
+The SQL abstraction closely interacts with the Table API, and SQL queries 
can be executed over tables defined in the *Table API*.
+
+
 ## Programs and Dataflows
 
 The basic building blocks of Flink programs are **streams** and 
**transformations**. (Note that the
-DataSets used in Flink's batch API are also streams internally -- more about 
that
-later.) Conceptually a *stream* is a never-ending flow of data records, and a 
*transformation* is an
+DataSets used in Flink's DataSet API are also streams internally -- more about 
that
+later.) Conceptually a *stream* is a (potentially never-ending) flow of data 
records, and a *transformation* is an
 operation that takes one or more streams as input, and produces one or more 
output streams as a
 result.
 
@@ -40,7 +76,7 @@ Each dataflow starts with one or more **sources** and ends in 
one or more **sink
 arbitrary 

[3/7] flink git commit: [FLINK-5485] [webfrontend] Mark compiled web frontend files as binary when processed by git diff

2017-01-16 Thread sewen
[FLINK-5485] [webfrontend] Mark compiled web frontend files as binary when 
processed by git diff

Particularly beneficial now that javascript is minified, we can mark
compiled web frontend files as binary when processed by git diff.
  https://linux.die.net/man/5/gitattributes

This does not affect how files are displayed by github.

This closes #3122


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/6b3c6834
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/6b3c6834
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/6b3c6834

Branch: refs/heads/release-1.2
Commit: 6b3c683450eb7aee1c9c65be75a0fddb06cea2ce
Parents: 30b467f
Author: Greg Hogan 
Authored: Fri Jan 13 13:33:29 2017 -0500
Committer: Stephan Ewen 
Committed: Mon Jan 16 15:37:39 2017 +0100

--
 .gitattributes | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/6b3c6834/.gitattributes
--
diff --git a/.gitattributes b/.gitattributes
index b68afa6..ecc9cf2 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -1,3 +1,3 @@
 *.bat text eol=crlf
-flink-runtime-web/web-dashboard/web/* linguist-vendored
+flink-runtime-web/web-dashboard/web/* linguist-vendored -diff
 



[1/7] flink git commit: [FLINK-5448] [checkpoints] Fix typo in StateAssignmentOperation Exception

2017-01-16 Thread sewen
Repository: flink
Updated Branches:
  refs/heads/release-1.2 f4869a66d -> 2eb926f2b


[FLINK-5448] [checkpoints] Fix typo in StateAssignmentOperation Exception

This closes #3097


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/119d39b6
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/119d39b6
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/119d39b6

Branch: refs/heads/release-1.2
Commit: 119d39b6edac6a0a80f90bb07794eca1f31425f7
Parents: f4869a6
Author: mtunique 
Authored: Thu Jan 12 11:55:57 2017 +0800
Committer: Stephan Ewen 
Committed: Mon Jan 16 15:37:25 2017 +0100

--
 .../flink/runtime/checkpoint/StateAssignmentOperation.java   | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/119d39b6/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/StateAssignmentOperation.java
--
diff --git 
a/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/StateAssignmentOperation.java
 
b/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/StateAssignmentOperation.java
index f11f69b..6c23f02 100644
--- 
a/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/StateAssignmentOperation.java
+++ 
b/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/StateAssignmentOperation.java
@@ -109,8 +109,8 @@ public class StateAssignmentOperation {
if (hasNonPartitionedState && parallelismChanged) {
throw new IllegalStateException("Cannot restore 
the latest checkpoint because " +
"the operator " + 
executionJobVertex.getJobVertexId() + " has non-partitioned " +
-   "state and its parallelism 
changed. The operator" + executionJobVertex.getJobVertexId() +
-   " has parallelism " + 
newParallelism + " whereas the corresponding" +
+   "state and its parallelism 
changed. The operator " + executionJobVertex.getJobVertexId() +
+   " has parallelism " + 
newParallelism + " whereas the corresponding " +
"state object has a parallelism 
of " + oldParallelism);
}
 



[2/7] flink git commit: [FLINK-5438] [streaming api] Typo in JobGraph generator Exception

2017-01-16 Thread sewen
[FLINK-5438] [streaming api] Typo in JobGraph generator Exception

This closes #3098


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/30b467f2
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/30b467f2
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/30b467f2

Branch: refs/heads/release-1.2
Commit: 30b467f266970e267792411d3148b4379ec23439
Parents: 119d39b
Author: mtunique 
Authored: Thu Jan 12 12:02:49 2017 +0800
Committer: Stephan Ewen 
Committed: Mon Jan 16 15:37:33 2017 +0100

--
 .../flink/streaming/api/graph/StreamingJobGraphGenerator.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/30b467f2/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/graph/StreamingJobGraphGenerator.java
--
diff --git 
a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/graph/StreamingJobGraphGenerator.java
 
b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/graph/StreamingJobGraphGenerator.java
index 0cb7d9a..1bfaf3f 100644
--- 
a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/graph/StreamingJobGraphGenerator.java
+++ 
b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/graph/StreamingJobGraphGenerator.java
@@ -317,7 +317,7 @@ public class StreamingJobGraphGenerator {
// the parallelism should always be smaller or equal 
than the max parallelism
throw new IllegalStateException("The maximum 
parallelism (" + maxParallelism + ") of " +
"the stream node " + streamNode + " is smaller 
than the parallelism (" +
-   parallelism + "). Increase the maximum 
parallelism or decrease the parallelism of" +
+   parallelism + "). Increase the maximum 
parallelism or decrease the parallelism of " +
"this operator.");
} else {

jobVertex.setMaxParallelism(streamNode.getMaxParallelism());



[6/7] flink git commit: [FLINK-3617] [scala apis] Added null value check.

2017-01-16 Thread sewen
[FLINK-3617] [scala apis] Added null value check.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/27c11e1b
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/27c11e1b
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/27c11e1b

Branch: refs/heads/release-1.2
Commit: 27c11e1b79bd68cbd2e8275c7938478e2e9532e6
Parents: d1b86aa
Author: Aleksandr Chermenin 
Authored: Fri Dec 16 14:42:50 2016 +0300
Committer: Stephan Ewen 
Committed: Mon Jan 16 15:38:15 2017 +0100

--
 .../flink/api/scala/typeutils/CaseClassSerializer.scala  | 11 +--
 1 file changed, 9 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/27c11e1b/flink-scala/src/main/scala/org/apache/flink/api/scala/typeutils/CaseClassSerializer.scala
--
diff --git 
a/flink-scala/src/main/scala/org/apache/flink/api/scala/typeutils/CaseClassSerializer.scala
 
b/flink-scala/src/main/scala/org/apache/flink/api/scala/typeutils/CaseClassSerializer.scala
index 625ee80..29b4952 100644
--- 
a/flink-scala/src/main/scala/org/apache/flink/api/scala/typeutils/CaseClassSerializer.scala
+++ 
b/flink-scala/src/main/scala/org/apache/flink/api/scala/typeutils/CaseClassSerializer.scala
@@ -20,7 +20,8 @@ package org.apache.flink.api.scala.typeutils
 import org.apache.flink.annotation.Internal
 import org.apache.flink.api.common.typeutils.TypeSerializer
 import org.apache.flink.api.java.typeutils.runtime.TupleSerializerBase
-import org.apache.flink.core.memory.{DataOutputView, DataInputView}
+import org.apache.flink.core.memory.{DataInputView, DataOutputView}
+import org.apache.flink.types.NullFieldException
 
 /**
  * Serializer for Case Classes. Creation and access is different from
@@ -97,7 +98,13 @@ abstract class CaseClassSerializer[T <: Product](
 var i = 0
 while (i < arity) {
   val serializer = fieldSerializers(i).asInstanceOf[TypeSerializer[Any]]
-  serializer.serialize(value.productElement(i), target)
+  val o = value.productElement(i)
+  try
+serializer.serialize(o, target)
+  catch {
+case e: NullPointerException =>
+  throw new NullFieldException(i, e)
+  }
   i += 1
 }
   }



[4/7] flink git commit: [FLINK-5345] [core] Add a utility to delete directories without failing in the presence of concurrent deletes

2017-01-16 Thread sewen
[FLINK-5345] [core] Add a utility to delete directories without failing in the 
presence of concurrent deletes


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/b1be3f5c
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/b1be3f5c
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/b1be3f5c

Branch: refs/heads/release-1.2
Commit: b1be3f5c3c9e7410d92c74422b10a6efb42fd4d5
Parents: 6b3c683
Author: Stephan Ewen 
Authored: Wed Jan 11 21:05:57 2017 +0100
Committer: Stephan Ewen 
Committed: Mon Jan 16 15:37:48 2017 +0100

--
 .../java/org/apache/flink/util/FileUtils.java   | 171 +--
 .../org/apache/flink/util/FileUtilsTest.java| 162 ++
 2 files changed, 319 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/b1be3f5c/flink-core/src/main/java/org/apache/flink/util/FileUtils.java
--
diff --git a/flink-core/src/main/java/org/apache/flink/util/FileUtils.java 
b/flink-core/src/main/java/org/apache/flink/util/FileUtils.java
index 23f5eb9..0d527d5 100644
--- a/flink-core/src/main/java/org/apache/flink/util/FileUtils.java
+++ b/flink-core/src/main/java/org/apache/flink/util/FileUtils.java
@@ -23,27 +23,28 @@ import org.apache.flink.core.fs.FileSystem;
 import org.apache.flink.core.fs.Path;
 
 import java.io.File;
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.nio.file.Files;
+import java.nio.file.NoSuchFileException;
 import java.nio.file.StandardOpenOption;
 
+import static org.apache.flink.util.Preconditions.checkNotNull;
+
 /**
- * This is a utility class to deal with temporary files.
+ * This is a utility class to deal files and directories. Contains utilities 
for recursive
+ * deletion and creation of temporary files.
  */
 public final class FileUtils {
 
-   /**
-* The alphabet to construct the random part of the filename from.
-*/
-   private static final char[] ALPHABET = { '0', '1', '2', '3', '4', '5', 
'6', '7', '8', '9', '0', 'a', 'b', 'c', 'd',
-   'e', 'f' };
+   /** The alphabet to construct the random part of the filename from. */
+   private static final char[] ALPHABET = 
+   { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 
'0', 'a', 'b', 'c', 'd', 'e', 'f' };
 
-   /**
-* The length of the random part of the filename.
-*/
-   private static final int LENGTH = 12;
+   /** The length of the random part of the filename. */
+   private static final int RANDOM_FILE_NAME_LENGTH = 12;
 
-   
+   // 

 
/**
 * Constructs a random filename with the given prefix and
@@ -54,10 +55,9 @@ public final class FileUtils {
 * @return the generated random filename with the given prefix
 */
public static String getRandomFilename(final String prefix) {
-
final StringBuilder stringBuilder = new StringBuilder(prefix);
 
-   for (int i = 0; i < LENGTH; i++) {
+   for (int i = 0; i < RANDOM_FILE_NAME_LENGTH; i++) {
stringBuilder.append(ALPHABET[(int) 
Math.floor(Math.random() * (double) ALPHABET.length)]);
}
 
@@ -87,7 +87,150 @@ public final class FileUtils {
}
 
// 

-   //  Deleting directories
+   //  Deleting directories on standard File Systems
+   // 

+
+   /**
+* Removes the given file or directory recursively.
+* 
+* If the file or directory does not exist, this does not throw an 
exception, but simply does nothing.
+* It considers the fact that a file-to-be-deleted is not present a 
success.
+* 
+* This method is safe against other concurrent deletion attempts.
+* 
+* @param file The file or directory to delete.
+* 
+* @throws IOException Thrown if the directory could not be cleaned for 
some reason, for example
+* due to missing access/write permissions.
+*/
+   public static void deleteFileOrDirectory(File file) throws IOException {
+   checkNotNull(file, "file");
+
+   if (file.isDirectory()) {
+   // file exists and is directory
+   deleteDirectory(file);
+   }
+   else if (file.exists()) {
+   try {
+   Files.delete(file.toPath());
+   }
+   

[5/7] flink git commit: [FLINK-5345] [core] Migrate various cleanup calls to concurrency-safe directory deletion

2017-01-16 Thread sewen
[FLINK-5345] [core] Migrate various cleanup calls to concurrency-safe directory 
deletion


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/d1b86aab
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/d1b86aab
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/d1b86aab

Branch: refs/heads/release-1.2
Commit: d1b86aab09061627d8b8c8f99b4277cc60e3dc28
Parents: b1be3f5
Author: Stephan Ewen 
Authored: Thu Jan 12 10:49:13 2017 +0100
Committer: Stephan Ewen 
Committed: Mon Jan 16 15:37:58 2017 +0100

--
 .../contrib/streaming/state/RocksDBKeyedStateBackend.java  | 4 +++-
 .../org/apache/flink/runtime/webmonitor/WebRuntimeMonitor.java | 6 +-
 .../src/main/java/org/apache/flink/runtime/blob/BlobCache.java | 3 ++-
 .../main/java/org/apache/flink/runtime/blob/BlobServer.java| 3 ++-
 .../org/apache/flink/runtime/io/disk/iomanager/IOManager.java  | 2 +-
 5 files changed, 13 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/d1b86aab/flink-contrib/flink-statebackend-rocksdb/src/main/java/org/apache/flink/contrib/streaming/state/RocksDBKeyedStateBackend.java
--
diff --git 
a/flink-contrib/flink-statebackend-rocksdb/src/main/java/org/apache/flink/contrib/streaming/state/RocksDBKeyedStateBackend.java
 
b/flink-contrib/flink-statebackend-rocksdb/src/main/java/org/apache/flink/contrib/streaming/state/RocksDBKeyedStateBackend.java
index 71e2c79..b207af6 100644
--- 
a/flink-contrib/flink-statebackend-rocksdb/src/main/java/org/apache/flink/contrib/streaming/state/RocksDBKeyedStateBackend.java
+++ 
b/flink-contrib/flink-statebackend-rocksdb/src/main/java/org/apache/flink/contrib/streaming/state/RocksDBKeyedStateBackend.java
@@ -17,7 +17,6 @@
 
 package org.apache.flink.contrib.streaming.state;
 
-import org.apache.commons.io.FileUtils;
 import org.apache.flink.api.common.JobID;
 import org.apache.flink.api.common.state.FoldingState;
 import org.apache.flink.api.common.state.FoldingStateDescriptor;
@@ -54,8 +53,10 @@ import 
org.apache.flink.runtime.state.KeyedBackendSerializationProxy;
 import org.apache.flink.runtime.state.RegisteredBackendStateMetaInfo;
 import org.apache.flink.runtime.state.StreamStateHandle;
 import org.apache.flink.runtime.util.SerializableObject;
+import org.apache.flink.util.FileUtils;
 import org.apache.flink.util.InstantiationUtil;
 import org.apache.flink.util.Preconditions;
+
 import org.rocksdb.ColumnFamilyDescriptor;
 import org.rocksdb.ColumnFamilyHandle;
 import org.rocksdb.ColumnFamilyOptions;
@@ -65,6 +66,7 @@ import org.rocksdb.RocksDB;
 import org.rocksdb.RocksDBException;
 import org.rocksdb.RocksIterator;
 import org.rocksdb.Snapshot;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 

http://git-wip-us.apache.org/repos/asf/flink/blob/d1b86aab/flink-runtime-web/src/main/java/org/apache/flink/runtime/webmonitor/WebRuntimeMonitor.java
--
diff --git 
a/flink-runtime-web/src/main/java/org/apache/flink/runtime/webmonitor/WebRuntimeMonitor.java
 
b/flink-runtime-web/src/main/java/org/apache/flink/runtime/webmonitor/WebRuntimeMonitor.java
index 3080b57..92c2e36 100644
--- 
a/flink-runtime-web/src/main/java/org/apache/flink/runtime/webmonitor/WebRuntimeMonitor.java
+++ 
b/flink-runtime-web/src/main/java/org/apache/flink/runtime/webmonitor/WebRuntimeMonitor.java
@@ -19,6 +19,7 @@
 package org.apache.flink.runtime.webmonitor;
 
 import akka.actor.ActorSystem;
+
 import io.netty.bootstrap.ServerBootstrap;
 import io.netty.channel.Channel;
 import io.netty.channel.ChannelFuture;
@@ -31,7 +32,7 @@ import io.netty.handler.codec.http.router.Handler;
 import io.netty.handler.codec.http.router.Router;
 import io.netty.handler.ssl.SslHandler;
 import io.netty.handler.stream.ChunkedWriteHandler;
-import org.apache.commons.io.FileUtils;
+
 import org.apache.flink.api.common.time.Time;
 import org.apache.flink.configuration.ConfigConstants;
 import org.apache.flink.configuration.Configuration;
@@ -81,8 +82,11 @@ import 
org.apache.flink.runtime.webmonitor.metrics.JobMetricsHandler;
 import org.apache.flink.runtime.webmonitor.metrics.JobVertexMetricsHandler;
 import org.apache.flink.runtime.webmonitor.metrics.MetricFetcher;
 import org.apache.flink.runtime.webmonitor.metrics.TaskManagerMetricsHandler;
+import org.apache.flink.util.FileUtils;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
 import scala.concurrent.ExecutionContext$;
 import scala.concurrent.ExecutionContextExecutor;
 import scala.concurrent.Promise;

http://git-wip-us.apache.org/repos/asf/flink/blob/d1b86aab/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobCache.java

flink git commit: [FLINK-5585] [jobmanager] Fix NullPointerException in JobManager.updateAccumulators

2017-01-20 Thread sewen
Repository: flink
Updated Branches:
  refs/heads/release-1.1 931929bf8 -> f6f1c244c


[FLINK-5585] [jobmanager] Fix NullPointerException in 
JobManager.updateAccumulators


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/f6f1c244
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/f6f1c244
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/f6f1c244

Branch: refs/heads/release-1.1
Commit: f6f1c244cf149d451a32fb3231a6bf1168bc31d1
Parents: 931929b
Author: Stephan Ewen 
Authored: Fri Jan 20 11:12:12 2017 +0100
Committer: Stephan Ewen 
Committed: Fri Jan 20 11:14:35 2017 +0100

--
 .../flink/runtime/jobmanager/JobManager.scala   | 23 ++--
 1 file changed, 12 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/f6f1c244/flink-runtime/src/main/scala/org/apache/flink/runtime/jobmanager/JobManager.scala
--
diff --git 
a/flink-runtime/src/main/scala/org/apache/flink/runtime/jobmanager/JobManager.scala
 
b/flink-runtime/src/main/scala/org/apache/flink/runtime/jobmanager/JobManager.scala
index d6d23d9..1720d94 100644
--- 
a/flink-runtime/src/main/scala/org/apache/flink/runtime/jobmanager/JobManager.scala
+++ 
b/flink-runtime/src/main/scala/org/apache/flink/runtime/jobmanager/JobManager.scala
@@ -1784,18 +1784,19 @@ class JobManager(
*
* @param accumulators list of accumulator snapshots
*/
-  private def updateAccumulators(accumulators : Seq[AccumulatorSnapshot]) = {
-accumulators foreach {
-  case accumulatorEvent =>
-currentJobs.get(accumulatorEvent.getJobID) match {
-  case Some((jobGraph, jobInfo)) =>
-future {
-  jobGraph.updateAccumulators(accumulatorEvent)
-}(context.dispatcher)
-  case None =>
-  // ignore accumulator values for old job
+  private def updateAccumulators(accumulators : Seq[AccumulatorSnapshot]): 
Unit = {
+accumulators.foreach( snapshot => {
+if (snapshot != null) {
+  currentJobs.get(snapshot.getJobID) match {
+case Some((jobGraph, jobInfo)) =>
+  future {
+jobGraph.updateAccumulators(snapshot)
+  }(context.dispatcher)
+case None =>
+  // ignore accumulator values for old job
+  }
 }
-}
+})
   }
 
   /**



<    6   7   8   9   10   11   12   13   14   15   >