This is an automated email from the ASF dual-hosted git repository. spmallette pushed a commit to branch driver-35 in repository https://gitbox.apache.org/repos/asf/tinkerpop.git
commit 2de7ee3d2932f780e6ee11b93f12fbef4f7a3c66 Author: stephen <[email protected]> AuthorDate: Tue Dec 10 07:34:38 2019 -0500 Fix driver close race condition Seemed like on slower systems the close of the connection pool could occur before the message was sent and during handshake which would generate an odd looking exception. --- .../org/apache/tinkerpop/gremlin/driver/Client.java | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/gremlin-driver/src/main/java/org/apache/tinkerpop/gremlin/driver/Client.java b/gremlin-driver/src/main/java/org/apache/tinkerpop/gremlin/driver/Client.java index d697c95..5b04df5 100644 --- a/gremlin-driver/src/main/java/org/apache/tinkerpop/gremlin/driver/Client.java +++ b/gremlin-driver/src/main/java/org/apache/tinkerpop/gremlin/driver/Client.java @@ -781,7 +781,24 @@ public abstract class Client { final RequestMessage closeMessage = buildMessage(RequestMessage.build(Tokens.OPS_CLOSE) .addArg(Tokens.ARGS_FORCE, forceClose)).create(); - final CompletableFuture<Void> sessionClose = submitAsync(closeMessage).thenCompose(s -> connectionPool.closeAsync()); + final CompletableFuture<Void> sessionClose = CompletableFuture.supplyAsync(() -> { + try { + // block this up until we get a response from the server or an exception. it might not be accurate + // to wait for maxWaitForSessionClose because we wait that long for this future in calls to close() + // but in either case we don't want to wait longer than that so perhaps this is still a sensible + // wait time - or at least better than something hardcoded. this wait will just expire a bit after + // the close() call's expiration....at least i think that's right. + submitAsync(closeMessage).get( + cluster.connectionPoolSettings().maxWaitForSessionClose, TimeUnit.MILLISECONDS).all().get(); + } catch (Exception ignored) { + // ignored - if the close message doesn't get to the server it's not a real worry. the server will + // eventually kill the session + } finally { + connectionPool.closeAsync(); + } + return null; + }, cluster.executor()); + closing.set(sessionClose); return sessionClose; @@ -802,6 +819,8 @@ public abstract class Client { this.getSessionId()); logger.warn(msg, ex); } finally { + // a bit of an insurance policy for closing down the client side as we do already call this + // in closeAsync() connectionPool.closeAsync().join(); } }
