Build: https://builds.apache.org/job/Lucene-Solr-NightlyTests-7.x/256/
1 tests failed. FAILED: org.apache.solr.cloud.MoveReplicaHDFSTest.testFailedMove Error Message: No live SolrServers available to handle this request:[https://127.0.0.1:52485/solr/MoveReplicaHDFSTest_failed_coll_true, https://127.0.0.1:37237/solr/MoveReplicaHDFSTest_failed_coll_true] Stack Trace: org.apache.solr.client.solrj.SolrServerException: No live SolrServers available to handle this request:[https://127.0.0.1:52485/solr/MoveReplicaHDFSTest_failed_coll_true, https://127.0.0.1:37237/solr/MoveReplicaHDFSTest_failed_coll_true] at __randomizedtesting.SeedInfo.seed([99FDE7D02B778ECA:333034229CA45B1A]:0) at org.apache.solr.client.solrj.impl.LBHttpSolrClient.request(LBHttpSolrClient.java:462) at org.apache.solr.client.solrj.impl.CloudSolrClient.sendRequest(CloudSolrClient.java:1106) at org.apache.solr.client.solrj.impl.CloudSolrClient.requestWithRetryOnStaleState(CloudSolrClient.java:886) at org.apache.solr.client.solrj.impl.CloudSolrClient.requestWithRetryOnStaleState(CloudSolrClient.java:993) at org.apache.solr.client.solrj.impl.CloudSolrClient.request(CloudSolrClient.java:819) at org.apache.solr.client.solrj.SolrRequest.process(SolrRequest.java:194) at org.apache.solr.client.solrj.SolrClient.query(SolrClient.java:942) at org.apache.solr.cloud.MoveReplicaTest.testFailedMove(MoveReplicaTest.java:288) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934) at com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970) at com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817) at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468) at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943) at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at java.lang.Thread.run(Thread.java:748) Caused by: org.apache.solr.client.solrj.impl.HttpSolrClient$RemoteSolrException: Error from server at https://127.0.0.1:37237/solr/MoveReplicaHDFSTest_failed_coll_true: no servers hosting shard: shard2 at org.apache.solr.client.solrj.impl.HttpSolrClient.executeMethod(HttpSolrClient.java:643) at org.apache.solr.client.solrj.impl.HttpSolrClient.request(HttpSolrClient.java:255) at org.apache.solr.client.solrj.impl.HttpSolrClient.request(HttpSolrClient.java:244) at org.apache.solr.client.solrj.impl.LBHttpSolrClient.doRequest(LBHttpSolrClient.java:483) at org.apache.solr.client.solrj.impl.LBHttpSolrClient.request(LBHttpSolrClient.java:436) ... 46 more Build Log: [...truncated 13967 lines...] [junit4] Suite: org.apache.solr.cloud.MoveReplicaHDFSTest [junit4] 2> 1635460 INFO (SUITE-MoveReplicaHDFSTest-seed#[99FDE7D02B778ECA]-worker) [ ] o.a.s.SolrTestCaseJ4 SecureRandom sanity checks: test.solr.allowed.securerandom=null & java.security.egd=file:/dev/./urandom [junit4] 2> Creating dataDir: /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.x/checkout/solr/build/solr-core/test/J1/temp/solr.cloud.MoveReplicaHDFSTest_99FDE7D02B778ECA-001/init-core-data-001 [junit4] 2> 1635460 WARN (SUITE-MoveReplicaHDFSTest-seed#[99FDE7D02B778ECA]-worker) [ ] o.a.s.SolrTestCaseJ4 startTrackingSearchers: numOpens=25 numCloses=25 [junit4] 2> 1635460 INFO (SUITE-MoveReplicaHDFSTest-seed#[99FDE7D02B778ECA]-worker) [ ] o.a.s.SolrTestCaseJ4 Using PointFields (NUMERIC_POINTS_SYSPROP=true) w/NUMERIC_DOCVALUES_SYSPROP=false [junit4] 2> 1635461 INFO (SUITE-MoveReplicaHDFSTest-seed#[99FDE7D02B778ECA]-worker) [ ] o.a.s.SolrTestCaseJ4 Randomized ssl (true) and clientAuth (false) via: @org.apache.solr.util.RandomizeSSL(reason=, ssl=NaN, value=NaN, clientAuth=NaN) [junit4] 2> 1635462 INFO (SUITE-MoveReplicaHDFSTest-seed#[99FDE7D02B778ECA]-worker) [ ] o.a.s.c.MiniSolrCloudCluster Starting cluster of 4 servers in /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.x/checkout/solr/build/solr-core/test/J1/temp/solr.cloud.MoveReplicaHDFSTest_99FDE7D02B778ECA-001/tempDir-001 [junit4] 2> 1635462 INFO (SUITE-MoveReplicaHDFSTest-seed#[99FDE7D02B778ECA]-worker) [ ] o.a.s.c.ZkTestServer STARTING ZK TEST SERVER [junit4] 2> 1635481 INFO (Thread-49218) [ ] o.a.s.c.ZkTestServer client port:0.0.0.0/0.0.0.0:0 [junit4] 2> 1635481 INFO (Thread-49218) [ ] o.a.s.c.ZkTestServer Starting server [junit4] 2> 1635554 ERROR (Thread-49218) [ ] o.a.z.s.ZooKeeperServer ZKShutdownHandler is not registered, so ZooKeeper server won't take any action on ERROR or SHUTDOWN server state changes [junit4] 2> 1635581 INFO (SUITE-MoveReplicaHDFSTest-seed#[99FDE7D02B778ECA]-worker) [ ] o.a.s.c.ZkTestServer start zk server on port:57106 [junit4] 2> 1635657 INFO (zkConnectionManagerCallback-3251-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected [junit4] 2> 1635667 INFO (jetty-launcher-3248-thread-1) [ ] o.e.j.s.Server jetty-9.4.11.v20180605; built: 2018-06-05T18:24:03.829Z; git: d5fc0523cfa96bfebfbda19606cad384d772f04c; jvm 1.8.0_172-b11 [junit4] 2> 1635673 INFO (jetty-launcher-3248-thread-2) [ ] o.e.j.s.Server jetty-9.4.11.v20180605; built: 2018-06-05T18:24:03.829Z; git: d5fc0523cfa96bfebfbda19606cad384d772f04c; jvm 1.8.0_172-b11 [junit4] 2> 1635704 INFO (jetty-launcher-3248-thread-3) [ ] o.e.j.s.Server jetty-9.4.11.v20180605; built: 2018-06-05T18:24:03.829Z; git: d5fc0523cfa96bfebfbda19606cad384d772f04c; jvm 1.8.0_172-b11 [junit4] 2> 1635792 INFO (jetty-launcher-3248-thread-2) [ ] o.e.j.s.session DefaultSessionIdManager workerName=node0 [junit4] 2> 1635792 INFO (jetty-launcher-3248-thread-2) [ ] o.e.j.s.session No SessionScavenger set, using defaults [junit4] 2> 1635792 INFO (jetty-launcher-3248-thread-2) [ ] o.e.j.s.session node0 Scavenging every 600000ms [junit4] 2> 1635798 INFO (jetty-launcher-3248-thread-2) [ ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@7efcb5{/solr,null,AVAILABLE} [junit4] 2> 1635798 INFO (jetty-launcher-3248-thread-2) [ ] o.e.j.s.AbstractConnector Started ServerConnector@7afed774{SSL,[ssl, http/1.1]}{127.0.0.1:52485} [junit4] 2> 1635799 INFO (jetty-launcher-3248-thread-2) [ ] o.e.j.s.Server Started @1635994ms [junit4] 2> 1635799 INFO (jetty-launcher-3248-thread-2) [ ] o.a.s.c.s.e.JettySolrRunner Jetty properties: {hostContext=/solr, hostPort=52485} [junit4] 2> 1635799 ERROR (jetty-launcher-3248-thread-2) [ ] o.a.s.u.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be missing or incomplete. [junit4] 2> 1635799 INFO (jetty-launcher-3248-thread-2) [ ] o.a.s.s.SolrDispatchFilter Using logger factory org.apache.logging.slf4j.Log4jLoggerFactory [junit4] 2> 1635799 INFO (jetty-launcher-3248-thread-2) [ ] o.a.s.s.SolrDispatchFilter ___ _ Welcome to Apache Solr? version 7.5.0 [junit4] 2> 1635799 INFO (jetty-launcher-3248-thread-2) [ ] o.a.s.s.SolrDispatchFilter / __| ___| |_ _ Starting in cloud mode on port null [junit4] 2> 1635799 INFO (jetty-launcher-3248-thread-2) [ ] o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_| Install dir: null [junit4] 2> 1635799 INFO (jetty-launcher-3248-thread-2) [ ] o.a.s.s.SolrDispatchFilter |___/\___/_|_| Start time: 2018-07-06T15:37:44.754Z [junit4] 2> 1635912 INFO (jetty-launcher-3248-thread-1) [ ] o.e.j.s.session DefaultSessionIdManager workerName=node0 [junit4] 2> 1635912 INFO (jetty-launcher-3248-thread-1) [ ] o.e.j.s.session No SessionScavenger set, using defaults [junit4] 2> 1635912 INFO (jetty-launcher-3248-thread-1) [ ] o.e.j.s.session node0 Scavenging every 600000ms [junit4] 2> 1635931 INFO (jetty-launcher-3248-thread-1) [ ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@2e48ffbb{/solr,null,AVAILABLE} [junit4] 2> 1636001 INFO (jetty-launcher-3248-thread-3) [ ] o.e.j.s.session DefaultSessionIdManager workerName=node0 [junit4] 2> 1636001 INFO (jetty-launcher-3248-thread-3) [ ] o.e.j.s.session No SessionScavenger set, using defaults [junit4] 2> 1636001 INFO (jetty-launcher-3248-thread-3) [ ] o.e.j.s.session node0 Scavenging every 660000ms [junit4] 2> 1636009 INFO (jetty-launcher-3248-thread-3) [ ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@4ed1f75d{/solr,null,AVAILABLE} [junit4] 2> 1636009 INFO (jetty-launcher-3248-thread-3) [ ] o.e.j.s.AbstractConnector Started ServerConnector@20dde7ef{SSL,[ssl, http/1.1]}{127.0.0.1:48507} [junit4] 2> 1636009 INFO (jetty-launcher-3248-thread-3) [ ] o.e.j.s.Server Started @1636204ms [junit4] 2> 1636009 INFO (jetty-launcher-3248-thread-3) [ ] o.a.s.c.s.e.JettySolrRunner Jetty properties: {hostContext=/solr, hostPort=48507} [junit4] 2> 1636010 ERROR (jetty-launcher-3248-thread-3) [ ] o.a.s.u.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be missing or incomplete. [junit4] 2> 1636010 INFO (jetty-launcher-3248-thread-3) [ ] o.a.s.s.SolrDispatchFilter Using logger factory org.apache.logging.slf4j.Log4jLoggerFactory [junit4] 2> 1636010 INFO (jetty-launcher-3248-thread-3) [ ] o.a.s.s.SolrDispatchFilter ___ _ Welcome to Apache Solr? version 7.5.0 [junit4] 2> 1636010 INFO (jetty-launcher-3248-thread-3) [ ] o.a.s.s.SolrDispatchFilter / __| ___| |_ _ Starting in cloud mode on port null [junit4] 2> 1636010 INFO (jetty-launcher-3248-thread-3) [ ] o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_| Install dir: null [junit4] 2> 1636010 INFO (jetty-launcher-3248-thread-3) [ ] o.a.s.s.SolrDispatchFilter |___/\___/_|_| Start time: 2018-07-06T15:37:44.965Z [junit4] 2> 1636038 INFO (jetty-launcher-3248-thread-4) [ ] o.e.j.s.Server jetty-9.4.11.v20180605; built: 2018-06-05T18:24:03.829Z; git: d5fc0523cfa96bfebfbda19606cad384d772f04c; jvm 1.8.0_172-b11 [junit4] 2> 1636156 INFO (jetty-launcher-3248-thread-4) [ ] o.e.j.s.session DefaultSessionIdManager workerName=node0 [junit4] 2> 1636156 INFO (jetty-launcher-3248-thread-4) [ ] o.e.j.s.session No SessionScavenger set, using defaults [junit4] 2> 1636156 INFO (jetty-launcher-3248-thread-4) [ ] o.e.j.s.session node0 Scavenging every 600000ms [junit4] 2> 1636174 INFO (jetty-launcher-3248-thread-4) [ ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@452a3c91{/solr,null,AVAILABLE} [junit4] 2> 1636174 INFO (jetty-launcher-3248-thread-4) [ ] o.e.j.s.AbstractConnector Started ServerConnector@e29403d{SSL,[ssl, http/1.1]}{127.0.0.1:35906} [junit4] 2> 1636174 INFO (jetty-launcher-3248-thread-4) [ ] o.e.j.s.Server Started @1636370ms [junit4] 2> 1636174 INFO (jetty-launcher-3248-thread-4) [ ] o.a.s.c.s.e.JettySolrRunner Jetty properties: {hostContext=/solr, hostPort=35906} [junit4] 2> 1636175 ERROR (jetty-launcher-3248-thread-4) [ ] o.a.s.u.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be missing or incomplete. [junit4] 2> 1636175 INFO (jetty-launcher-3248-thread-4) [ ] o.a.s.s.SolrDispatchFilter Using logger factory org.apache.logging.slf4j.Log4jLoggerFactory [junit4] 2> 1636175 INFO (jetty-launcher-3248-thread-4) [ ] o.a.s.s.SolrDispatchFilter ___ _ Welcome to Apache Solr? version 7.5.0 [junit4] 2> 1636175 INFO (jetty-launcher-3248-thread-4) [ ] o.a.s.s.SolrDispatchFilter / __| ___| |_ _ Starting in cloud mode on port null [junit4] 2> 1636175 INFO (jetty-launcher-3248-thread-4) [ ] o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_| Install dir: null [junit4] 2> 1636175 INFO (jetty-launcher-3248-thread-4) [ ] o.a.s.s.SolrDispatchFilter |___/\___/_|_| Start time: 2018-07-06T15:37:45.130Z [junit4] 2> 1636186 INFO (jetty-launcher-3248-thread-1) [ ] o.e.j.s.AbstractConnector Started ServerConnector@1b3e529c{SSL,[ssl, http/1.1]}{127.0.0.1:37237} [junit4] 2> 1636186 INFO (jetty-launcher-3248-thread-1) [ ] o.e.j.s.Server Started @1636381ms [junit4] 2> 1636186 INFO (jetty-launcher-3248-thread-1) [ ] o.a.s.c.s.e.JettySolrRunner Jetty properties: {hostContext=/solr, hostPort=37237} [junit4] 2> 1636186 ERROR (jetty-launcher-3248-thread-1) [ ] o.a.s.u.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be missing or incomplete. [junit4] 2> 1636186 INFO (jetty-launcher-3248-thread-1) [ ] o.a.s.s.SolrDispatchFilter Using logger factory org.apache.logging.slf4j.Log4jLoggerFactory [junit4] 2> 1636187 INFO (jetty-launcher-3248-thread-1) [ ] o.a.s.s.SolrDispatchFilter ___ _ Welcome to Apache Solr? version 7.5.0 [junit4] 2> 1636187 INFO (jetty-launcher-3248-thread-1) [ ] o.a.s.s.SolrDispatchFilter / __| ___| |_ _ Starting in cloud mode on port null [junit4] 2> 1636187 INFO (jetty-launcher-3248-thread-1) [ ] o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_| Install dir: null [junit4] 2> 1636187 INFO (jetty-launcher-3248-thread-1) [ ] o.a.s.s.SolrDispatchFilter |___/\___/_|_| Start time: 2018-07-06T15:37:45.142Z [junit4] 2> 1636216 INFO (zkConnectionManagerCallback-3253-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected [junit4] 2> 1636216 INFO (jetty-launcher-3248-thread-2) [ ] o.a.s.s.SolrDispatchFilter solr.xml found in ZooKeeper. Loading... [junit4] 2> 1636227 INFO (zkConnectionManagerCallback-3259-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected [junit4] 2> 1636245 INFO (jetty-launcher-3248-thread-1) [ ] o.a.s.s.SolrDispatchFilter solr.xml found in ZooKeeper. Loading... [junit4] 2> 1636256 INFO (zkConnectionManagerCallback-3257-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected [junit4] 2> 1636256 INFO (jetty-launcher-3248-thread-4) [ ] o.a.s.s.SolrDispatchFilter solr.xml found in ZooKeeper. Loading... [junit4] 2> 1636269 INFO (zkConnectionManagerCallback-3255-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected [junit4] 2> 1636270 INFO (jetty-launcher-3248-thread-3) [ ] o.a.s.s.SolrDispatchFilter solr.xml found in ZooKeeper. Loading... [junit4] 2> 1636744 INFO (jetty-launcher-3248-thread-1) [ ] o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:57106/solr [junit4] 2> 1636774 INFO (jetty-launcher-3248-thread-3) [ ] o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:57106/solr [junit4] 2> 1636794 INFO (zkConnectionManagerCallback-3263-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected [junit4] 2> 1636794 INFO (zkConnectionManagerCallback-3267-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected [junit4] 2> 1636825 INFO (zkConnectionManagerCallback-3269-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected [junit4] 2> 1636828 INFO (zkConnectionManagerCallback-3271-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected [junit4] 2> 1636996 INFO (jetty-launcher-3248-thread-3) [n:127.0.0.1:48507_solr ] o.a.s.c.Overseer Overseer (id=null) closing [junit4] 2> 1636997 INFO (jetty-launcher-3248-thread-1) [n:127.0.0.1:37237_solr ] o.a.s.c.Overseer Overseer (id=null) closing [junit4] 2> 1636997 INFO (jetty-launcher-3248-thread-3) [n:127.0.0.1:48507_solr ] o.a.s.c.OverseerElectionContext I am going to be the leader 127.0.0.1:48507_solr [junit4] 2> 1636999 INFO (jetty-launcher-3248-thread-1) [n:127.0.0.1:37237_solr ] o.a.s.c.ZkController Register node as live in ZooKeeper:/live_nodes/127.0.0.1:37237_solr [junit4] 2> 1636999 INFO (jetty-launcher-3248-thread-3) [n:127.0.0.1:48507_solr ] o.a.s.c.Overseer Overseer (id=74098605335773192-127.0.0.1:48507_solr-n_0000000000) starting [junit4] 2> 1637015 INFO (zkCallback-3270-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1) [junit4] 2> 1637034 INFO (zkCallback-3268-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1) [junit4] 2> 1637297 INFO (zkConnectionManagerCallback-3281-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected [junit4] 2> 1637298 INFO (jetty-launcher-3248-thread-3) [n:127.0.0.1:48507_solr ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1) [junit4] 2> 1637298 INFO (jetty-launcher-3248-thread-3) [n:127.0.0.1:48507_solr ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:57106/solr ready [junit4] 2> 1637312 INFO (zkConnectionManagerCallback-3285-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected [junit4] 2> 1637313 INFO (jetty-launcher-3248-thread-1) [n:127.0.0.1:37237_solr ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1) [junit4] 2> 1637314 INFO (jetty-launcher-3248-thread-1) [n:127.0.0.1:37237_solr ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:57106/solr ready [junit4] 2> 1637323 INFO (jetty-launcher-3248-thread-3) [n:127.0.0.1:48507_solr ] o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 2147483647 transient cores [junit4] 2> 1637324 INFO (jetty-launcher-3248-thread-3) [n:127.0.0.1:48507_solr ] o.a.s.c.ZkController Register node as live in ZooKeeper:/live_nodes/127.0.0.1:48507_solr [junit4] 2> 1637341 INFO (jetty-launcher-3248-thread-1) [n:127.0.0.1:37237_solr ] o.a.s.h.a.MetricsHistoryHandler No .system collection, keeping metrics history in memory. [junit4] 2> 1637374 INFO (jetty-launcher-3248-thread-1) [n:127.0.0.1:37237_solr ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr_37237.solr.node' (registry 'solr.node') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@3a195aad [junit4] 2> 1637381 INFO (jetty-launcher-3248-thread-3) [n:127.0.0.1:48507_solr ] o.a.s.h.a.MetricsHistoryHandler No .system collection, keeping metrics history in memory. [junit4] 2> 1637386 INFO (zkCallback-3268-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2) [junit4] 2> 1637390 INFO (zkCallback-3270-thread-2) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2) [junit4] 2> 1637391 INFO (jetty-launcher-3248-thread-1) [n:127.0.0.1:37237_solr ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr_37237.solr.jvm' (registry 'solr.jvm') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@3a195aad [junit4] 2> 1637392 INFO (jetty-launcher-3248-thread-1) [n:127.0.0.1:37237_solr ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr_37237.solr.jetty' (registry 'solr.jetty') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@3a195aad [junit4] 2> 1637397 INFO (jetty-launcher-3248-thread-1) [n:127.0.0.1:37237_solr ] o.a.s.c.CorePropertiesLocator Found 0 core definitions underneath /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.x/checkout/solr/build/solr-core/test/J1/temp/solr.cloud.MoveReplicaHDFSTest_99FDE7D02B778ECA-001/tempDir-001/node1/. [junit4] 2> 1637398 INFO (zkCallback-3284-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2) [junit4] 2> 1637402 INFO (zkCallback-3280-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2) [junit4] 2> 1637466 INFO (jetty-launcher-3248-thread-3) [n:127.0.0.1:48507_solr ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr_48507.solr.node' (registry 'solr.node') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@3a195aad [junit4] 2> 1637514 INFO (jetty-launcher-3248-thread-4) [ ] o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:57106/solr [junit4] 2> 1637546 INFO (jetty-launcher-3248-thread-3) [n:127.0.0.1:48507_solr ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr_48507.solr.jvm' (registry 'solr.jvm') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@3a195aad [junit4] 2> 1637547 INFO (jetty-launcher-3248-thread-3) [n:127.0.0.1:48507_solr ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr_48507.solr.jetty' (registry 'solr.jetty') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@3a195aad [junit4] 2> 1637548 INFO (jetty-launcher-3248-thread-3) [n:127.0.0.1:48507_solr ] o.a.s.c.CorePropertiesLocator Found 0 core definitions underneath /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.x/checkout/solr/build/solr-core/test/J1/temp/solr.cloud.MoveReplicaHDFSTest_99FDE7D02B778ECA-001/tempDir-001/node3/. [junit4] 2> 1637572 INFO (jetty-launcher-3248-thread-2) [ ] o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:57106/solr [junit4] 2> 1637646 INFO (zkConnectionManagerCallback-3292-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected [junit4] 2> 1637654 INFO (zkConnectionManagerCallback-3295-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected [junit4] 2> 1637680 INFO (zkConnectionManagerCallback-3297-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected [junit4] 2> 1637683 INFO (jetty-launcher-3248-thread-4) [n:127.0.0.1:35906_solr ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (2) [junit4] 2> 1637684 INFO (jetty-launcher-3248-thread-4) [n:127.0.0.1:35906_solr ] o.a.s.c.Overseer Overseer (id=null) closing [junit4] 2> 1637686 INFO (jetty-launcher-3248-thread-4) [n:127.0.0.1:35906_solr ] o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 2147483647 transient cores [junit4] 2> 1637686 INFO (jetty-launcher-3248-thread-4) [n:127.0.0.1:35906_solr ] o.a.s.c.ZkController Register node as live in ZooKeeper:/live_nodes/127.0.0.1:35906_solr [junit4] 2> 1637686 INFO (zkCallback-3270-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3) [junit4] 2> 1637687 INFO (zkCallback-3268-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3) [junit4] 2> 1637701 INFO (zkConnectionManagerCallback-3299-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected [junit4] 2> 1637705 INFO (jetty-launcher-3248-thread-2) [n:127.0.0.1:52485_solr ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (3) [junit4] 2> 1637706 INFO (jetty-launcher-3248-thread-2) [n:127.0.0.1:52485_solr ] o.a.s.c.Overseer Overseer (id=null) closing [junit4] 2> 1637708 INFO (jetty-launcher-3248-thread-2) [n:127.0.0.1:52485_solr ] o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 2147483647 transient cores [junit4] 2> 1637708 INFO (jetty-launcher-3248-thread-2) [n:127.0.0.1:52485_solr ] o.a.s.c.ZkController Register node as live in ZooKeeper:/live_nodes/127.0.0.1:52485_solr [junit4] 2> 1637708 INFO (zkCallback-3270-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4) [junit4] 2> 1637709 INFO (zkCallback-3268-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4) [junit4] 2> 1637757 INFO (zkCallback-3284-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (4) [junit4] 2> 1637758 INFO (zkCallback-3280-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (4) [junit4] 2> 1637768 INFO (zkCallback-3296-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (4) [junit4] 2> 1637826 INFO (zkCallback-3298-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4) [junit4] 2> 1637830 INFO (zkConnectionManagerCallback-3308-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected [junit4] 2> 1637830 INFO (jetty-launcher-3248-thread-4) [n:127.0.0.1:35906_solr ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (4) [junit4] 2> 1637831 INFO (jetty-launcher-3248-thread-4) [n:127.0.0.1:35906_solr ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:57106/solr ready [junit4] 2> 1637857 INFO (jetty-launcher-3248-thread-4) [n:127.0.0.1:35906_solr ] o.a.s.h.a.MetricsHistoryHandler No .system collection, keeping metrics history in memory. [junit4] 2> 1637881 INFO (jetty-launcher-3248-thread-4) [n:127.0.0.1:35906_solr ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr_35906.solr.node' (registry 'solr.node') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@3a195aad [junit4] 2> 1637893 INFO (zkConnectionManagerCallback-3314-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected [junit4] 2> 1637894 INFO (jetty-launcher-3248-thread-2) [n:127.0.0.1:52485_solr ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (4) [junit4] 2> 1637895 INFO (jetty-launcher-3248-thread-2) [n:127.0.0.1:52485_solr ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:57106/solr ready [junit4] 2> 1637903 INFO (jetty-launcher-3248-thread-4) [n:127.0.0.1:35906_solr ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr_35906.solr.jvm' (registry 'solr.jvm') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@3a195aad [junit4] 2> 1637903 INFO (jetty-launcher-3248-thread-4) [n:127.0.0.1:35906_solr ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr_35906.solr.jetty' (registry 'solr.jetty') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@3a195aad [junit4] 2> 1637904 INFO (jetty-launcher-3248-thread-4) [n:127.0.0.1:35906_solr ] o.a.s.c.CorePropertiesLocator Found 0 core definitions underneath /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.x/checkout/solr/build/solr-core/test/J1/temp/solr.cloud.MoveReplicaHDFSTest_99FDE7D02B778ECA-001/tempDir-001/node4/. [junit4] 2> 1637941 INFO (jetty-launcher-3248-thread-2) [n:127.0.0.1:52485_solr ] o.a.s.h.a.MetricsHistoryHandler No .system collection, keeping metrics history in memory. [junit4] 2> 1638000 INFO (jetty-launcher-3248-thread-2) [n:127.0.0.1:52485_solr ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr_52485.solr.node' (registry 'solr.node') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@3a195aad [junit4] 2> 1638020 INFO (jetty-launcher-3248-thread-2) [n:127.0.0.1:52485_solr ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr_52485.solr.jvm' (registry 'solr.jvm') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@3a195aad [junit4] 2> 1638020 INFO (jetty-launcher-3248-thread-2) [n:127.0.0.1:52485_solr ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr_52485.solr.jetty' (registry 'solr.jetty') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@3a195aad [junit4] 2> 1638021 INFO (jetty-launcher-3248-thread-2) [n:127.0.0.1:52485_solr ] o.a.s.c.CorePropertiesLocator Found 0 core definitions underneath /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.x/checkout/solr/build/solr-core/test/J1/temp/solr.cloud.MoveReplicaHDFSTest_99FDE7D02B778ECA-001/tempDir-001/node2/. [junit4] 2> 1638201 INFO (zkConnectionManagerCallback-3317-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected [junit4] 2> 1638203 INFO (zkConnectionManagerCallback-3322-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected [junit4] 2> 1638204 INFO (SUITE-MoveReplicaHDFSTest-seed#[99FDE7D02B778ECA]-worker) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (4) [junit4] 2> 1638205 INFO (SUITE-MoveReplicaHDFSTest-seed#[99FDE7D02B778ECA]-worker) [ ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:57106/solr ready [junit4] 2> 1638229 INFO (qtp1218754706-58645) [n:127.0.0.1:35906_solr ] o.a.s.h.a.CollectionsHandler Invoked Collection Action :overseerstatus with params action=OVERSEERSTATUS&wt=javabin&version=2 and sendToOCPQueue=true [junit4] 2> 1638252 INFO (qtp1218754706-58645) [n:127.0.0.1:35906_solr ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/collections params={action=OVERSEERSTATUS&wt=javabin&version=2} status=0 QTime=23 [junit4] 2> 1640253 INFO (OverseerCollectionConfigSetProcessor-74098605335773192-127.0.0.1:48507_solr-n_0000000000) [ ] o.a.s.c.OverseerTaskQueue Response ZK path: /overseer/collection-queue-work/qnr-0000000000 doesn't exist. Requestor may have disconnected from ZooKeeper [junit4] 2> 1640292 WARN (SUITE-MoveReplicaHDFSTest-seed#[99FDE7D02B778ECA]-worker) [ ] o.a.h.u.NativeCodeLoader Unable to load native-hadoop library for your platform... using builtin-java classes where applicable [junit4] 1> Formatting using clusterid: testClusterID [junit4] 2> 1641957 WARN (SUITE-MoveReplicaHDFSTest-seed#[99FDE7D02B778ECA]-worker) [ ] o.a.h.m.i.MetricsConfig Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties [junit4] 2> 1642583 INFO (SUITE-MoveReplicaHDFSTest-seed#[99FDE7D02B778ECA]-worker) [ ] o.m.log Logging to org.apache.logging.slf4j.Log4jLogger@385b31c6 via org.mortbay.log.Slf4jLog [junit4] 2> 1642848 WARN (SUITE-MoveReplicaHDFSTest-seed#[99FDE7D02B778ECA]-worker) [ ] o.a.h.h.HttpRequestLog Jetty request log can only be enabled using Log4j [junit4] 2> 1644792 INFO (SUITE-MoveReplicaHDFSTest-seed#[99FDE7D02B778ECA]-worker) [ ] o.m.log jetty-6.1.26 [junit4] 2> 1645048 INFO (SUITE-MoveReplicaHDFSTest-seed#[99FDE7D02B778ECA]-worker) [ ] o.m.log Extract jar:file:/x1/jenkins/.ivy2/cache/org.apache.hadoop/hadoop-hdfs/tests/hadoop-hdfs-2.7.4-tests.jar!/webapps/hdfs to ./temp/Jetty_localhost_60875_hdfs____.jy9ohm/webapp [junit4] 2> 1646577 INFO (SUITE-MoveReplicaHDFSTest-seed#[99FDE7D02B778ECA]-worker) [ ] o.m.log Started HttpServer2$SelectChannelConnectorWithSafeStartup@localhost:60875 [junit4] 2> 1652962 WARN (SUITE-MoveReplicaHDFSTest-seed#[99FDE7D02B778ECA]-worker) [ ] o.a.h.h.HttpRequestLog Jetty request log can only be enabled using Log4j [junit4] 2> 1652973 INFO (SUITE-MoveReplicaHDFSTest-seed#[99FDE7D02B778ECA]-worker) [ ] o.m.log jetty-6.1.26 [junit4] 2> 1653131 INFO (SUITE-MoveReplicaHDFSTest-seed#[99FDE7D02B778ECA]-worker) [ ] o.m.log Extract jar:file:/x1/jenkins/.ivy2/cache/org.apache.hadoop/hadoop-hdfs/tests/hadoop-hdfs-2.7.4-tests.jar!/webapps/datanode to ./temp/Jetty_localhost_43182_datanode____76h81f/webapp [junit4] 2> 1655435 INFO (SUITE-MoveReplicaHDFSTest-seed#[99FDE7D02B778ECA]-worker) [ ] o.m.log Started HttpServer2$SelectChannelConnectorWithSafeStartup@localhost:43182 [junit4] 2> 1658789 WARN (SUITE-MoveReplicaHDFSTest-seed#[99FDE7D02B778ECA]-worker) [ ] o.a.h.h.HttpRequestLog Jetty request log can only be enabled using Log4j [junit4] 2> 1658791 INFO (SUITE-MoveReplicaHDFSTest-seed#[99FDE7D02B778ECA]-worker) [ ] o.m.log jetty-6.1.26 [junit4] 2> 1658884 INFO (SUITE-MoveReplicaHDFSTest-seed#[99FDE7D02B778ECA]-worker) [ ] o.m.log Extract jar:file:/x1/jenkins/.ivy2/cache/org.apache.hadoop/hadoop-hdfs/tests/hadoop-hdfs-2.7.4-tests.jar!/webapps/datanode to ./temp/Jetty_localhost_38853_datanode____.4q25hk/webapp [junit4] 2> 1661134 INFO (SUITE-MoveReplicaHDFSTest-seed#[99FDE7D02B778ECA]-worker) [ ] o.m.log Started HttpServer2$SelectChannelConnectorWithSafeStartup@localhost:38853 [junit4] 2> 1663343 ERROR (DataNode: [[[DISK]file:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.x/checkout/solr/build/solr-core/test/J1/temp/solr.cloud.MoveReplicaHDFSTest_99FDE7D02B778ECA-001/tempDir-002/hdfsBaseDir/data/data3/, [DISK]file:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.x/checkout/solr/build/solr-core/test/J1/temp/solr.cloud.MoveReplicaHDFSTest_99FDE7D02B778ECA-001/tempDir-002/hdfsBaseDir/data/data4/]] heartbeating to localhost/127.0.0.1:34742) [ ] o.a.h.h.s.d.DirectoryScanner dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value below 1 ms/sec. Assuming default value of 1000 [junit4] 2> 1663357 ERROR (DataNode: [[[DISK]file:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.x/checkout/solr/build/solr-core/test/J1/temp/solr.cloud.MoveReplicaHDFSTest_99FDE7D02B778ECA-001/tempDir-002/hdfsBaseDir/data/data1/, [DISK]file:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.x/checkout/solr/build/solr-core/test/J1/temp/solr.cloud.MoveReplicaHDFSTest_99FDE7D02B778ECA-001/tempDir-002/hdfsBaseDir/data/data2/]] heartbeating to localhost/127.0.0.1:34742) [ ] o.a.h.h.s.d.DirectoryScanner dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value below 1 ms/sec. Assuming default value of 1000 [junit4] 2> 1664670 INFO (Block report processor) [ ] BlockStateChange BLOCK* processReport 0x6ea4bf8afe25b9: from storage DS-c755faf1-5a1f-44e2-af9e-6de79e079ebb node DatanodeRegistration(127.0.0.1:53005, datanodeUuid=84ad3de6-de29-4dcc-a96c-b382ff1bc742, infoPort=41197, infoSecurePort=0, ipcPort=50194, storageInfo=lv=-56;cid=testClusterID;nsid=891737923;c=0), blocks: 0, hasStaleStorage: true, processing time: 2 msecs [junit4] 2> 1664670 INFO (Block report processor) [ ] BlockStateChange BLOCK* processReport 0x6ea4bf8afe25b9: from storage DS-edb86a84-e53b-46d7-b195-22bd679a209b node DatanodeRegistration(127.0.0.1:53005, datanodeUuid=84ad3de6-de29-4dcc-a96c-b382ff1bc742, infoPort=41197, infoSecurePort=0, ipcPort=50194, storageInfo=lv=-56;cid=testClusterID;nsid=891737923;c=0), blocks: 0, hasStaleStorage: false, processing time: 0 msecs [junit4] 2> 1664721 INFO (Block report processor) [ ] BlockStateChange BLOCK* processReport 0x6ea4bf8b3f829e: from storage DS-5e4d9747-0c00-4a27-a4f8-99ff78906e85 node DatanodeRegistration(127.0.0.1:46971, datanodeUuid=4820458e-9186-472c-a307-f09ab4239f27, infoPort=56408, infoSecurePort=0, ipcPort=41377, storageInfo=lv=-56;cid=testClusterID;nsid=891737923;c=0), blocks: 0, hasStaleStorage: true, processing time: 0 msecs [junit4] 2> 1664721 INFO (Block report processor) [ ] BlockStateChange BLOCK* processReport 0x6ea4bf8b3f829e: from storage DS-b301765c-e7e0-4810-92fb-e23e1c8677dc node DatanodeRegistration(127.0.0.1:46971, datanodeUuid=4820458e-9186-472c-a307-f09ab4239f27, infoPort=56408, infoSecurePort=0, ipcPort=41377, storageInfo=lv=-56;cid=testClusterID;nsid=891737923;c=0), blocks: 0, hasStaleStorage: false, processing time: 0 msecs [junit4] 2> 1665983 INFO (TEST-MoveReplicaHDFSTest.testFailedMove-seed#[99FDE7D02B778ECA]) [ ] o.a.s.SolrTestCaseJ4 ###Starting testFailedMove [junit4] 2> 1665984 INFO (TEST-MoveReplicaHDFSTest.testFailedMove-seed#[99FDE7D02B778ECA]) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (4) [junit4] 2> 1665993 INFO (TEST-MoveReplicaHDFSTest.testFailedMove-seed#[99FDE7D02B778ECA]) [ ] o.e.j.s.Server jetty-9.4.11.v20180605; built: 2018-06-05T18:24:03.829Z; git: d5fc0523cfa96bfebfbda19606cad384d772f04c; jvm 1.8.0_172-b11 [junit4] 2> 1666052 INFO (TEST-MoveReplicaHDFSTest.testFailedMove-seed#[99FDE7D02B778ECA]) [ ] o.e.j.s.session DefaultSessionIdManager workerName=node0 [junit4] 2> 1666052 INFO (TEST-MoveReplicaHDFSTest.testFailedMove-seed#[99FDE7D02B778ECA]) [ ] o.e.j.s.session No SessionScavenger set, using defaults [junit4] 2> 1666052 INFO (TEST-MoveReplicaHDFSTest.testFailedMove-seed#[99FDE7D02B778ECA]) [ ] o.e.j.s.session node0 Scavenging every 600000ms [junit4] 2> 1666053 INFO (TEST-MoveReplicaHDFSTest.testFailedMove-seed#[99FDE7D02B778ECA]) [ ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@30d34b93{/solr,null,AVAILABLE} [junit4] 2> 1666054 INFO (TEST-MoveReplicaHDFSTest.testFailedMove-seed#[99FDE7D02B778ECA]) [ ] o.e.j.s.AbstractConnector Started ServerConnector@4f7c4277{SSL,[ssl, http/1.1]}{127.0.0.1:54092} [junit4] 2> 1666054 INFO (TEST-MoveReplicaHDFSTest.testFailedMove-seed#[99FDE7D02B778ECA]) [ ] o.e.j.s.Server Started @1666249ms [junit4] 2> 1666054 INFO (TEST-MoveReplicaHDFSTest.testFailedMove-seed#[99FDE7D02B778ECA]) [ ] o.a.s.c.s.e.JettySolrRunner Jetty properties: {hostContext=/solr, hostPort=54092} [junit4] 2> 1666054 ERROR (TEST-MoveReplicaHDFSTest.testFailedMove-seed#[99FDE7D02B778ECA]) [ ] o.a.s.u.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be missing or incomplete. [junit4] 2> 1666054 INFO (TEST-MoveReplicaHDFSTest.testFailedMove-seed#[99FDE7D02B778ECA]) [ ] o.a.s.s.SolrDispatchFilter Using logger factory org.apache.logging.slf4j.Log4jLoggerFactory [junit4] 2> 1666054 INFO (TEST-MoveReplicaHDFSTest.testFailedMove-seed#[99FDE7D02B778ECA]) [ ] o.a.s.s.SolrDispatchFilter ___ _ Welcome to Apache Solr? version 7.5.0 [junit4] 2> 1666054 INFO (TEST-MoveReplicaHDFSTest.testFailedMove-seed#[99FDE7D02B778ECA]) [ ] o.a.s.s.SolrDispatchFilter / __| ___| |_ _ Starting in cloud mode on port null [junit4] 2> 1666054 INFO (TEST-MoveReplicaHDFSTest.testFailedMove-seed#[99FDE7D02B778ECA]) [ ] o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_| Install dir: null [junit4] 2> 1666054 INFO (TEST-MoveReplicaHDFSTest.testFailedMove-seed#[99FDE7D02B778ECA]) [ ] o.a.s.s.SolrDispatchFilter |___/\___/_|_| Start time: 2018-07-06T15:38:15.009Z [junit4] 2> 1666073 INFO (zkConnectionManagerCallback-3326-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected [junit4] 2> 1666074 INFO (TEST-MoveReplicaHDFSTest.testFailedMove-seed#[99FDE7D02B778ECA]) [ ] o.a.s.s.SolrDispatchFilter solr.xml found in ZooKeeper. Loading... [junit4] 2> 1666422 INFO (TEST-MoveReplicaHDFSTest.testFailedMove-seed#[99FDE7D02B778ECA]) [ ] o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:57106/solr [junit4] 2> 1666427 INFO (zkConnectionManagerCallback-3330-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected [junit4] 2> 1666428 INFO (zkConnectionManagerCallback-3332-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected [junit4] 2> 1666440 INFO (TEST-MoveReplicaHDFSTest.testFailedMove-seed#[99FDE7D02B778ECA]) [n:127.0.0.1:54092_solr ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (4) [junit4] 2> 1666441 INFO (TEST-MoveReplicaHDFSTest.testFailedMove-seed#[99FDE7D02B778ECA]) [n:127.0.0.1:54092_solr ] o.a.s.c.Overseer Overseer (id=null) closing [junit4] 2> 1666442 INFO (TEST-MoveReplicaHDFSTest.testFailedMove-seed#[99FDE7D02B778ECA]) [n:127.0.0.1:54092_solr ] o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 2147483647 transient cores [junit4] 2> 1666442 INFO (TEST-MoveReplicaHDFSTest.testFailedMove-seed#[99FDE7D02B778ECA]) [n:127.0.0.1:54092_solr ] o.a.s.c.ZkController Register node as live in ZooKeeper:/live_nodes/127.0.0.1:54092_solr [junit4] 2> 1666444 INFO (zkCallback-3296-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (4) -> (5) [junit4] 2> 1666444 INFO (zkCallback-3313-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (4) -> (5) [junit4] 2> 1666444 INFO (zkCallback-3298-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (4) -> (5) [junit4] 2> 1666444 INFO (zkCallback-3280-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (4) -> (5) [junit4] 2> 1666444 INFO (zkCallback-3284-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (4) -> (5) [junit4] 2> 1666444 INFO (zkCallback-3307-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (4) -> (5) [junit4] 2> 1666444 INFO (zkCallback-3268-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (4) -> (5) [junit4] 2> 1666444 INFO (zkCallback-3321-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (4) -> (5) [junit4] 2> 1666444 INFO (zkCallback-3321-thread-2) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (4) -> (5) [junit4] 2> 1666467 INFO (zkCallback-3270-thread-2) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (4) -> (5) [junit4] 2> 1666467 INFO (zkCallback-3331-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (4) -> (5) [junit4] 2> 1666490 INFO (zkConnectionManagerCallback-3339-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected [junit4] 2> 1666492 INFO (TEST-MoveReplicaHDFSTest.testFailedMove-seed#[99FDE7D02B778ECA]) [n:127.0.0.1:54092_solr ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (5) [junit4] 2> 1666493 INFO (TEST-MoveReplicaHDFSTest.testFailedMove-seed#[99FDE7D02B778ECA]) [n:127.0.0.1:54092_solr ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:57106/solr ready [junit4] 2> 1666494 INFO (TEST-MoveReplicaHDFSTest.testFailedMove-seed#[99FDE7D02B778ECA]) [n:127.0.0.1:54092_solr ] o.a.s.h.a.MetricsHistoryHandler No .system collection, keeping metrics history in memory. [junit4] 2> 1666600 INFO (TEST-MoveReplicaHDFSTest.testFailedMove-seed#[99FDE7D02B778ECA]) [n:127.0.0.1:54092_solr ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr_54092.solr.node' (registry 'solr.node') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@3a195aad [junit4] 2> 1666836 INFO (TEST-MoveReplicaHDFSTest.testFailedMove-seed#[99FDE7D02B778ECA]) [n:127.0.0.1:54092_solr ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr_54092.solr.jvm' (registry 'solr.jvm') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@3a195aad [junit4] 2> 1666837 INFO (TEST-MoveReplicaHDFSTest.testFailedMove-seed#[99FDE7D02B778ECA]) [n:127.0.0.1:54092_solr ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr_54092.solr.jetty' (registry 'solr.jetty') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@3a195aad [junit4] 2> 1666838 INFO (TEST-MoveReplicaHDFSTest.testFailedMove-seed#[99FDE7D02B778ECA]) [n:127.0.0.1:54092_solr ] o.a.s.c.CorePropertiesLocator Found 0 core definitions underneath /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.x/checkout/solr/build/solr-core/test/J1/temp/solr.cloud.MoveReplicaHDFSTest_99FDE7D02B778ECA-001/tempDir-001/node5/. [junit4] 2> 1667146 INFO (zkConnectionManagerCallback-3342-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected [junit4] 2> 1667150 INFO (qtp1218754706-58641) [n:127.0.0.1:35906_solr ] o.a.s.h.a.CollectionsHandler Invoked Collection Action :create with params collection.configName=conf1&autoAddReplicas=false&name=MoveReplicaHDFSTest_failed_coll_true&nrtReplicas=2&action=CREATE&numShards=2&wt=javabin&version=2 and sendToOCPQueue=true [junit4] 2> 1667312 INFO (OverseerThreadFactory-5601-thread-2) [ ] o.a.s.c.a.c.CreateCollectionCmd Create collection MoveReplicaHDFSTest_failed_coll_true [junit4] 2> 1667427 INFO (OverseerStateUpdate-74098605335773192-127.0.0.1:48507_solr-n_0000000000) [ ] o.a.s.c.o.SliceMutator createReplica() { [junit4] 2> "operation":"ADDREPLICA", [junit4] 2> "collection":"MoveReplicaHDFSTest_failed_coll_true", [junit4] 2> "shard":"shard1", [junit4] 2> "core":"MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n1", [junit4] 2> "state":"down", [junit4] 2> "base_url":"https://127.0.0.1:37237/solr", [junit4] 2> "type":"NRT", [junit4] 2> "waitForFinalState":"false"} [junit4] 2> 1667429 INFO (OverseerStateUpdate-74098605335773192-127.0.0.1:48507_solr-n_0000000000) [ ] o.a.s.c.o.SliceMutator createReplica() { [junit4] 2> "operation":"ADDREPLICA", [junit4] 2> "collection":"MoveReplicaHDFSTest_failed_coll_true", [junit4] 2> "shard":"shard1", [junit4] 2> "core":"MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n3", [junit4] 2> "state":"down", [junit4] 2> "base_url":"https://127.0.0.1:52485/solr", [junit4] 2> "type":"NRT", [junit4] 2> "waitForFinalState":"false"} [junit4] 2> 1667431 INFO (OverseerStateUpdate-74098605335773192-127.0.0.1:48507_solr-n_0000000000) [ ] o.a.s.c.o.SliceMutator createReplica() { [junit4] 2> "operation":"ADDREPLICA", [junit4] 2> "collection":"MoveReplicaHDFSTest_failed_coll_true", [junit4] 2> "shard":"shard2", [junit4] 2> "core":"MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n5", [junit4] 2> "state":"down", [junit4] 2> "base_url":"https://127.0.0.1:48507/solr", [junit4] 2> "type":"NRT", [junit4] 2> "waitForFinalState":"false"} [junit4] 2> 1667432 INFO (OverseerStateUpdate-74098605335773192-127.0.0.1:48507_solr-n_0000000000) [ ] o.a.s.c.o.SliceMutator createReplica() { [junit4] 2> "operation":"ADDREPLICA", [junit4] 2> "collection":"MoveReplicaHDFSTest_failed_coll_true", [junit4] 2> "shard":"shard2", [junit4] 2> "core":"MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n7", [junit4] 2> "state":"down", [junit4] 2> "base_url":"https://127.0.0.1:35906/solr", [junit4] 2> "type":"NRT", [junit4] 2> "waitForFinalState":"false"} [junit4] 2> 1667747 INFO (qtp1218754706-58640) [n:127.0.0.1:35906_solr x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n7] o.a.s.h.a.CoreAdminOperation core create command qt=/admin/cores&coreNodeName=core_node8&collection.configName=conf1&newCollection=true&name=MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n7&action=CREATE&numShards=2&collection=MoveReplicaHDFSTest_failed_coll_true&shard=shard2&wt=javabin&version=2&replicaType=NRT [junit4] 2> 1667853 INFO (qtp1936336053-58621) [n:127.0.0.1:37237_solr x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n1] o.a.s.h.a.CoreAdminOperation core create command qt=/admin/cores&coreNodeName=core_node2&collection.configName=conf1&newCollection=true&name=MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n1&action=CREATE&numShards=2&collection=MoveReplicaHDFSTest_failed_coll_true&shard=shard1&wt=javabin&version=2&replicaType=NRT [junit4] 2> 1667853 INFO (qtp1936336053-58621) [n:127.0.0.1:37237_solr x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n1] o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 2147483647 transient cores [junit4] 2> 1667855 INFO (qtp1626468552-58610) [n:127.0.0.1:52485_solr x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n3] o.a.s.h.a.CoreAdminOperation core create command qt=/admin/cores&coreNodeName=core_node4&collection.configName=conf1&newCollection=true&name=MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n3&action=CREATE&numShards=2&collection=MoveReplicaHDFSTest_failed_coll_true&shard=shard1&wt=javabin&version=2&replicaType=NRT [junit4] 2> 1667893 INFO (qtp1350213693-58630) [n:127.0.0.1:48507_solr x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n5] o.a.s.h.a.CoreAdminOperation core create command qt=/admin/cores&coreNodeName=core_node6&collection.configName=conf1&newCollection=true&name=MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n5&action=CREATE&numShards=2&collection=MoveReplicaHDFSTest_failed_coll_true&shard=shard2&wt=javabin&version=2&replicaType=NRT [junit4] 2> 1668776 INFO (qtp1218754706-58640) [n:127.0.0.1:35906_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard2 r:core_node8 x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n7] o.a.s.c.SolrConfig Using Lucene MatchVersion: 7.5.0 [junit4] 2> 1668783 INFO (qtp1218754706-58640) [n:127.0.0.1:35906_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard2 r:core_node8 x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n7] o.a.s.s.IndexSchema [MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n7] Schema name=minimal [junit4] 2> 1668785 INFO (qtp1218754706-58640) [n:127.0.0.1:35906_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard2 r:core_node8 x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n7] o.a.s.s.IndexSchema Loaded schema minimal/1.1 with uniqueid field id [junit4] 2> 1668785 INFO (qtp1218754706-58640) [n:127.0.0.1:35906_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard2 r:core_node8 x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n7] o.a.s.c.CoreContainer Creating SolrCore 'MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n7' using configuration from collection MoveReplicaHDFSTest_failed_coll_true, trusted=true [junit4] 2> 1668786 INFO (qtp1218754706-58640) [n:127.0.0.1:35906_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard2 r:core_node8 x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n7] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr_35906.solr.core.MoveReplicaHDFSTest_failed_coll_true.shard2.replica_n7' (registry 'solr.core.MoveReplicaHDFSTest_failed_coll_true.shard2.replica_n7') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@3a195aad [junit4] 2> 1668808 INFO (qtp1218754706-58640) [n:127.0.0.1:35906_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard2 r:core_node8 x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n7] o.a.s.c.HdfsDirectoryFactory solr.hdfs.home=hdfs://localhost:34742/data [junit4] 2> 1668809 INFO (qtp1218754706-58640) [n:127.0.0.1:35906_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard2 r:core_node8 x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n7] o.a.s.c.HdfsDirectoryFactory Solr Kerberos Authentication disabled [junit4] 2> 1668809 INFO (qtp1218754706-58640) [n:127.0.0.1:35906_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard2 r:core_node8 x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n7] o.a.s.c.SolrCore solr.RecoveryStrategy.Builder [junit4] 2> 1668809 INFO (qtp1218754706-58640) [n:127.0.0.1:35906_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard2 r:core_node8 x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n7] o.a.s.c.SolrCore [[MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n7] ] Opening new SolrCore at [/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.x/checkout/solr/build/solr-core/test/J1/temp/solr.cloud.MoveReplicaHDFSTest_99FDE7D02B778ECA-001/tempDir-001/node4/MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n7], dataDir=[hdfs://localhost:34742/data/MoveReplicaHDFSTest_failed_coll_true/core_node8/data/] [junit4] 2> 1668812 INFO (qtp1218754706-58640) [n:127.0.0.1:35906_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard2 r:core_node8 x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n7] o.a.s.c.HdfsDirectoryFactory creating directory factory for path hdfs://localhost:34742/data/MoveReplicaHDFSTest_failed_coll_true/core_node8/data/snapshot_metadata [junit4] 2> 1668873 INFO (qtp1936336053-58621) [n:127.0.0.1:37237_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node2 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n1] o.a.s.c.SolrConfig Using Lucene MatchVersion: 7.5.0 [junit4] 2> 1668888 INFO (qtp1936336053-58621) [n:127.0.0.1:37237_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node2 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n1] o.a.s.s.IndexSchema [MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n1] Schema name=minimal [junit4] 2> 1668890 INFO (qtp1936336053-58621) [n:127.0.0.1:37237_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node2 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n1] o.a.s.s.IndexSchema Loaded schema minimal/1.1 with uniqueid field id [junit4] 2> 1668890 INFO (qtp1936336053-58621) [n:127.0.0.1:37237_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node2 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n1] o.a.s.c.CoreContainer Creating SolrCore 'MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n1' using configuration from collection MoveReplicaHDFSTest_failed_coll_true, trusted=true [junit4] 2> 1668891 INFO (qtp1936336053-58621) [n:127.0.0.1:37237_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node2 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n1] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr_37237.solr.core.MoveReplicaHDFSTest_failed_coll_true.shard1.replica_n1' (registry 'solr.core.MoveReplicaHDFSTest_failed_coll_true.shard1.replica_n1') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@3a195aad [junit4] 2> 1668891 INFO (qtp1936336053-58621) [n:127.0.0.1:37237_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node2 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n1] o.a.s.c.HdfsDirectoryFactory solr.hdfs.home=hdfs://localhost:34742/data [junit4] 2> 1668891 INFO (qtp1936336053-58621) [n:127.0.0.1:37237_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node2 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n1] o.a.s.c.HdfsDirectoryFactory Solr Kerberos Authentication disabled [junit4] 2> 1668891 INFO (qtp1936336053-58621) [n:127.0.0.1:37237_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node2 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n1] o.a.s.c.SolrCore solr.RecoveryStrategy.Builder [junit4] 2> 1668905 INFO (qtp1936336053-58621) [n:127.0.0.1:37237_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node2 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n1] o.a.s.c.SolrCore [[MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n1] ] Opening new SolrCore at [/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.x/checkout/solr/build/solr-core/test/J1/temp/solr.cloud.MoveReplicaHDFSTest_99FDE7D02B778ECA-001/tempDir-001/node1/MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n1], dataDir=[hdfs://localhost:34742/data/MoveReplicaHDFSTest_failed_coll_true/core_node2/data/] [junit4] 2> 1668906 INFO (qtp1936336053-58621) [n:127.0.0.1:37237_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node2 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n1] o.a.s.c.HdfsDirectoryFactory creating directory factory for path hdfs://localhost:34742/data/MoveReplicaHDFSTest_failed_coll_true/core_node2/data/snapshot_metadata [junit4] 2> 1668921 INFO (qtp1350213693-58630) [n:127.0.0.1:48507_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard2 r:core_node6 x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n5] o.a.s.c.SolrConfig Using Lucene MatchVersion: 7.5.0 [junit4] 2> 1668919 INFO (qtp1626468552-58610) [n:127.0.0.1:52485_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node4 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n3] o.a.s.c.SolrConfig Using Lucene MatchVersion: 7.5.0 [junit4] 2> 1669019 INFO (qtp1350213693-58630) [n:127.0.0.1:48507_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard2 r:core_node6 x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n5] o.a.s.s.IndexSchema [MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n5] Schema name=minimal [junit4] 2> 1669021 INFO (qtp1350213693-58630) [n:127.0.0.1:48507_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard2 r:core_node6 x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n5] o.a.s.s.IndexSchema Loaded schema minimal/1.1 with uniqueid field id [junit4] 2> 1669021 INFO (qtp1350213693-58630) [n:127.0.0.1:48507_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard2 r:core_node6 x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n5] o.a.s.c.CoreContainer Creating SolrCore 'MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n5' using configuration from collection MoveReplicaHDFSTest_failed_coll_true, trusted=true [junit4] 2> 1669021 INFO (qtp1350213693-58630) [n:127.0.0.1:48507_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard2 r:core_node6 x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n5] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr_48507.solr.core.MoveReplicaHDFSTest_failed_coll_true.shard2.replica_n5' (registry 'solr.core.MoveReplicaHDFSTest_failed_coll_true.shard2.replica_n5') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@3a195aad [junit4] 2> 1669022 INFO (qtp1350213693-58630) [n:127.0.0.1:48507_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard2 r:core_node6 x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n5] o.a.s.c.HdfsDirectoryFactory solr.hdfs.home=hdfs://localhost:34742/data [junit4] 2> 1669022 INFO (qtp1350213693-58630) [n:127.0.0.1:48507_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard2 r:core_node6 x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n5] o.a.s.c.HdfsDirectoryFactory Solr Kerberos Authentication disabled [junit4] 2> 1669022 INFO (qtp1350213693-58630) [n:127.0.0.1:48507_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard2 r:core_node6 x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n5] o.a.s.c.SolrCore solr.RecoveryStrategy.Builder [junit4] 2> 1669022 INFO (qtp1350213693-58630) [n:127.0.0.1:48507_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard2 r:core_node6 x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n5] o.a.s.c.SolrCore [[MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n5] ] Opening new SolrCore at [/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.x/checkout/solr/build/solr-core/test/J1/temp/solr.cloud.MoveReplicaHDFSTest_99FDE7D02B778ECA-001/tempDir-001/node3/MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n5], dataDir=[hdfs://localhost:34742/data/MoveReplicaHDFSTest_failed_coll_true/core_node6/data/] [junit4] 2> 1669023 INFO (qtp1350213693-58630) [n:127.0.0.1:48507_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard2 r:core_node6 x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n5] o.a.s.c.HdfsDirectoryFactory creating directory factory for path hdfs://localhost:34742/data/MoveReplicaHDFSTest_failed_coll_true/core_node6/data/snapshot_metadata [junit4] 2> 1669026 INFO (qtp1626468552-58610) [n:127.0.0.1:52485_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node4 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n3] o.a.s.s.IndexSchema [MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n3] Schema name=minimal [junit4] 2> 1669028 INFO (qtp1626468552-58610) [n:127.0.0.1:52485_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node4 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n3] o.a.s.s.IndexSchema Loaded schema minimal/1.1 with uniqueid field id [junit4] 2> 1669028 INFO (qtp1626468552-58610) [n:127.0.0.1:52485_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node4 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n3] o.a.s.c.CoreContainer Creating SolrCore 'MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n3' using configuration from collection MoveReplicaHDFSTest_failed_coll_true, trusted=true [junit4] 2> 1669028 INFO (qtp1626468552-58610) [n:127.0.0.1:52485_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node4 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n3] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr_52485.solr.core.MoveReplicaHDFSTest_failed_coll_true.shard1.replica_n3' (registry 'solr.core.MoveReplicaHDFSTest_failed_coll_true.shard1.replica_n3') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@3a195aad [junit4] 2> 1669028 INFO (qtp1626468552-58610) [n:127.0.0.1:52485_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node4 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n3] o.a.s.c.HdfsDirectoryFactory solr.hdfs.home=hdfs://localhost:34742/data [junit4] 2> 1669028 INFO (qtp1626468552-58610) [n:127.0.0.1:52485_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node4 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n3] o.a.s.c.HdfsDirectoryFactory Solr Kerberos Authentication disabled [junit4] 2> 1669029 INFO (qtp1626468552-58610) [n:127.0.0.1:52485_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node4 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n3] o.a.s.c.SolrCore solr.RecoveryStrategy.Builder [junit4] 2> 1669029 INFO (qtp1626468552-58610) [n:127.0.0.1:52485_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node4 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n3] o.a.s.c.SolrCore [[MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n3] ] Opening new SolrCore at [/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.x/checkout/solr/build/solr-core/test/J1/temp/solr.cloud.MoveReplicaHDFSTest_99FDE7D02B778ECA-001/tempDir-001/node2/MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n3], dataDir=[hdfs://localhost:34742/data/MoveReplicaHDFSTest_failed_coll_true/core_node4/data/] [junit4] 2> 1669034 INFO (qtp1626468552-58610) [n:127.0.0.1:52485_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node4 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n3] o.a.s.c.HdfsDirectoryFactory creating directory factory for path hdfs://localhost:34742/data/MoveReplicaHDFSTest_failed_coll_true/core_node4/data/snapshot_metadata [junit4] 2> 1669145 INFO (qtp1350213693-58630) [n:127.0.0.1:48507_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard2 r:core_node6 x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n5] o.a.s.c.HdfsDirectoryFactory creating directory factory for path hdfs://localhost:34742/data/MoveReplicaHDFSTest_failed_coll_true/core_node6/data [junit4] 2> 1669149 INFO (qtp1218754706-58640) [n:127.0.0.1:35906_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard2 r:core_node8 x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n7] o.a.s.c.HdfsDirectoryFactory creating directory factory for path hdfs://localhost:34742/data/MoveReplicaHDFSTest_failed_coll_true/core_node8/data [junit4] 2> 1669152 INFO (qtp1936336053-58621) [n:127.0.0.1:37237_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node2 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n1] o.a.s.c.HdfsDirectoryFactory creating directory factory for path hdfs://localhost:34742/data/MoveReplicaHDFSTest_failed_coll_true/core_node2/data [junit4] 2> 1669163 INFO (qtp1626468552-58610) [n:127.0.0.1:52485_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node4 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n3] o.a.s.c.HdfsDirectoryFactory creating directory factory for path hdfs://localhost:34742/data/MoveReplicaHDFSTest_failed_coll_true/core_node4/data [junit4] 2> 1669473 INFO (qtp1350213693-58630) [n:127.0.0.1:48507_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard2 r:core_node6 x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n5] o.a.s.c.HdfsDirectoryFactory creating directory factory for path hdfs://localhost:34742/data/MoveReplicaHDFSTest_failed_coll_true/core_node6/data/index [junit4] 2> 1669474 INFO (qtp1626468552-58610) [n:127.0.0.1:52485_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node4 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n3] o.a.s.c.HdfsDirectoryFactory creating directory factory for path hdfs://localhost:34742/data/MoveReplicaHDFSTest_failed_coll_true/core_node4/data/index [junit4] 2> 1669476 INFO (qtp1218754706-58640) [n:127.0.0.1:35906_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard2 r:core_node8 x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n7] o.a.s.c.HdfsDirectoryFactory creating directory factory for path hdfs://localhost:34742/data/MoveReplicaHDFSTest_failed_coll_true/core_node8/data/index [junit4] 2> 1669487 INFO (qtp1936336053-58621) [n:127.0.0.1:37237_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node2 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n1] o.a.s.c.HdfsDirectoryFactory creating directory factory for path hdfs://localhost:34742/data/MoveReplicaHDFSTest_failed_coll_true/core_node2/data/index [junit4] 2> 1670937 INFO (Block report processor) [ ] BlockStateChange BLOCK* addStoredBlock: blockMap updated: 127.0.0.1:53005 is added to blk_1073741828_1004{UCState=COMMITTED, truncateBlock=null, primaryNodeIndex=-1, replicas=[ReplicaUC[[DISK]DS-b301765c-e7e0-4810-92fb-e23e1c8677dc:NORMAL:127.0.0.1:46971|RBW], ReplicaUC[[DISK]DS-c755faf1-5a1f-44e2-af9e-6de79e079ebb:NORMAL:127.0.0.1:53005|RBW]]} size 69 [junit4] 2> 1670939 INFO (Block report processor) [ ] BlockStateChange BLOCK* addStoredBlock: blockMap updated: 127.0.0.1:53005 is added to blk_1073741825_1001{UCState=UNDER_CONSTRUCTION, truncateBlock=null, primaryNodeIndex=-1, replicas=[ReplicaUC[[DISK]DS-5e4d9747-0c00-4a27-a4f8-99ff78906e85:NORMAL:127.0.0.1:46971|RBW], ReplicaUC[[DISK]DS-c755faf1-5a1f-44e2-af9e-6de79e079ebb:NORMAL:127.0.0.1:53005|RBW]]} size 0 [junit4] 2> 1670939 INFO (Block report processor) [ ] BlockStateChange BLOCK* addStoredBlock: blockMap updated: 127.0.0.1:53005 is added to blk_1073741826_1002{UCState=UNDER_CONSTRUCTION, truncateBlock=null, primaryNodeIndex=-1, replicas=[ReplicaUC[[DISK]DS-5e4d9747-0c00-4a27-a4f8-99ff78906e85:NORMAL:127.0.0.1:46971|RBW], ReplicaUC[[DISK]DS-c755faf1-5a1f-44e2-af9e-6de79e079ebb:NORMAL:127.0.0.1:53005|RBW]]} size 0 [junit4] 2> 1670940 INFO (Block report processor) [ ] BlockStateChange BLOCK* addStoredBlock: blockMap updated: 127.0.0.1:46971 is added to blk_1073741828_1004 size 69 [junit4] 2> 1670949 INFO (Block report processor) [ ] BlockStateChange BLOCK* addStoredBlock: blockMap updated: 127.0.0.1:46971 is added to blk_1073741825_1001 size 69 [junit4] 2> 1670951 INFO (Block report processor) [ ] BlockStateChange BLOCK* addStoredBlock: blockMap updated: 127.0.0.1:46971 is added to blk_1073741826_1002 size 69 [junit4] 2> 1670973 INFO (Block report processor) [ ] BlockStateChange BLOCK* addStoredBlock: blockMap updated: 127.0.0.1:53005 is added to blk_1073741827_1003{UCState=UNDER_CONSTRUCTION, truncateBlock=null, primaryNodeIndex=-1, replicas=[ReplicaUC[[DISK]DS-b301765c-e7e0-4810-92fb-e23e1c8677dc:NORMAL:127.0.0.1:46971|RBW], ReplicaUC[[DISK]DS-edb86a84-e53b-46d7-b195-22bd679a209b:NORMAL:127.0.0.1:53005|RBW]]} size 0 [junit4] 2> 1670987 INFO (Block report processor) [ ] BlockStateChange BLOCK* addStoredBlock: blockMap updated: 127.0.0.1:46971 is added to blk_1073741827_1003{UCState=UNDER_CONSTRUCTION, truncateBlock=null, primaryNodeIndex=-1, replicas=[ReplicaUC[[DISK]DS-b301765c-e7e0-4810-92fb-e23e1c8677dc:NORMAL:127.0.0.1:46971|RBW], ReplicaUC[[DISK]DS-edb86a84-e53b-46d7-b195-22bd679a209b:NORMAL:127.0.0.1:53005|RBW]]} size 0 [junit4] 2> 1671381 INFO (qtp1218754706-58640) [n:127.0.0.1:35906_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard2 r:core_node8 x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n7] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.HdfsUpdateLog [junit4] 2> 1671381 INFO (qtp1626468552-58610) [n:127.0.0.1:52485_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node4 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n3] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.HdfsUpdateLog [junit4] 2> 1671381 INFO (qtp1626468552-58610) [n:127.0.0.1:52485_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node4 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n3] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir=null defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536 [junit4] 2> 1671381 INFO (qtp1626468552-58610) [n:127.0.0.1:52485_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node4 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n3] o.a.s.u.HdfsUpdateLog Initializing HdfsUpdateLog: tlogDfsReplication=3 [junit4] 2> 1671386 INFO (qtp1218754706-58640) [n:127.0.0.1:35906_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard2 r:core_node8 x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n7] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir=null defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536 [junit4] 2> 1671386 INFO (qtp1218754706-58640) [n:127.0.0.1:35906_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard2 r:core_node8 x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n7] o.a.s.u.HdfsUpdateLog Initializing HdfsUpdateLog: tlogDfsReplication=3 [junit4] 2> 1671412 INFO (qtp1626468552-58610) [n:127.0.0.1:52485_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node4 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n3] o.a.s.u.CommitTracker Hard AutoCommit: disabled [junit4] 2> 1671412 INFO (qtp1626468552-58610) [n:127.0.0.1:52485_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node4 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n3] o.a.s.u.CommitTracker Soft AutoCommit: disabled [junit4] 2> 1671417 INFO (qtp1936336053-58621) [n:127.0.0.1:37237_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node2 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n1] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.HdfsUpdateLog [junit4] 2> 1671417 INFO (qtp1936336053-58621) [n:127.0.0.1:37237_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node2 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n1] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir=null defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536 [junit4] 2> 1671417 INFO (qtp1936336053-58621) [n:127.0.0.1:37237_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node2 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n1] o.a.s.u.HdfsUpdateLog Initializing HdfsUpdateLog: tlogDfsReplication=3 [junit4] 2> 1671440 INFO (qtp1218754706-58640) [n:127.0.0.1:35906_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard2 r:core_node8 x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n7] o.a.s.u.CommitTracker Hard AutoCommit: disabled [junit4] 2> 1671440 INFO (qtp1218754706-58640) [n:127.0.0.1:35906_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard2 r:core_node8 x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n7] o.a.s.u.CommitTracker Soft AutoCommit: disabled [junit4] 2> 1671456 INFO (qtp1936336053-58621) [n:127.0.0.1:37237_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node2 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n1] o.a.s.u.CommitTracker Hard AutoCommit: disabled [junit4] 2> 1671456 INFO (qtp1936336053-58621) [n:127.0.0.1:37237_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node2 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n1] o.a.s.u.CommitTracker Soft AutoCommit: disabled [junit4] 2> 1671893 INFO (qtp1350213693-58630) [n:127.0.0.1:48507_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard2 r:core_node6 x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n5] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.HdfsUpdateLog [junit4] 2> 1671893 INFO (qtp1350213693-58630) [n:127.0.0.1:48507_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard2 r:core_node6 x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n5] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir=null defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536 [junit4] 2> 1671893 INFO (qtp1350213693-58630) [n:127.0.0.1:48507_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard2 r:core_node6 x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n5] o.a.s.u.HdfsUpdateLog Initializing HdfsUpdateLog: tlogDfsReplication=3 [junit4] 2> 1671918 INFO (qtp1936336053-58621) [n:127.0.0.1:37237_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node2 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n1] o.a.s.s.SolrIndexSearcher Opening [Searcher@5ef95593[MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n1] main] [junit4] 2> 1671922 INFO (qtp1626468552-58610) [n:127.0.0.1:52485_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node4 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n3] o.a.s.s.SolrIndexSearcher Opening [Searcher@7f926a97[MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n3] main] [junit4] 2> 1671923 INFO (qtp1936336053-58621) [n:127.0.0.1:37237_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node2 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n1] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/conf1 [junit4] 2> 1671923 INFO (qtp1936336053-58621) [n:127.0.0.1:37237_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node2 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n1] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1 [junit4] 2> 1671929 INFO (qtp1936336053-58621) [n:127.0.0.1:37237_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node2 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n1] o.a.s.h.ReplicationHandler Commits will be reserved for 10000ms. [junit4] 2> 1671938 INFO (searcherExecutor-5632-thread-1-processing-n:127.0.0.1:37237_solr x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n1 c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node2) [n:127.0.0.1:37237_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node2 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n1] o.a.s.c.SolrCore [MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n1] Registered new searcher Searcher@5ef95593[MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n1] main{ExitableDirectoryReader(UninvertingDirectoryReader())} [junit4] 2> 1671938 INFO (qtp1626468552-58610) [n:127.0.0.1:52485_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node4 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n3] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/conf1 [junit4] 2> 1671939 INFO (qtp1626468552-58610) [n:127.0.0.1:52485_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node4 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n3] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1 [junit4] 2> 1671940 INFO (qtp1626468552-58610) [n:127.0.0.1:52485_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node4 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n3] o.a.s.h.ReplicationHandler Commits will be reserved for 10000ms. [junit4] 2> 1671941 INFO (qtp1626468552-58610) [n:127.0.0.1:52485_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node4 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n3] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1605256086443524096 [junit4] 2> 1671942 INFO (searcherExecutor-5634-thread-1-processing-n:127.0.0.1:52485_solr x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n3 c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node4) [n:127.0.0.1:52485_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node4 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n3] o.a.s.c.SolrCore [MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n3] Registered new searcher Searcher@7f926a97[MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n3] main{ExitableDirectoryReader(UninvertingDirectoryReader())} [junit4] 2> 1671948 INFO (qtp1936336053-58621) [n:127.0.0.1:37237_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node2 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n1] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1605256086450864128 [junit4] 2> 1671949 INFO (qtp1626468552-58610) [n:127.0.0.1:52485_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node4 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n3] o.a.s.c.ZkShardTerms Successful update of terms at /collections/MoveReplicaHDFSTest_failed_coll_true/terms/shard1 to Terms{values={core_node4=0}, version=0} [junit4] 2> 1671952 INFO (qtp1626468552-58610) [n:127.0.0.1:52485_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node4 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n3] o.a.s.c.ShardLeaderElectionContext Waiting until we see more replicas up for shard shard1: total=2 found=1 timeoutin=9999ms [junit4] 2> 1671977 INFO (qtp1936336053-58621) [n:127.0.0.1:37237_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node2 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n1] o.a.s.c.ZkShardTerms Successful update of terms at /collections/MoveReplicaHDFSTest_failed_coll_true/terms/shard1 to Terms{values={core_node2=0, core_node4=0}, version=1} [junit4] 2> 1671986 INFO (qtp1350213693-58630) [n:127.0.0.1:48507_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard2 r:core_node6 x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n5] o.a.s.u.CommitTracker Hard AutoCommit: disabled [junit4] 2> 1671986 INFO (qtp1350213693-58630) [n:127.0.0.1:48507_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard2 r:core_node6 x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n5] o.a.s.u.CommitTracker Soft AutoCommit: disabled [junit4] 2> 1671993 INFO (qtp1218754706-58640) [n:127.0.0.1:35906_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard2 r:core_node8 x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n7] o.a.s.s.SolrIndexSearcher Opening [Searcher@4b4ff2d7[MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n7] main] [junit4] 2> 1671994 INFO (qtp1218754706-58640) [n:127.0.0.1:35906_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard2 r:core_node8 x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n7] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/conf1 [junit4] 2> 1671994 INFO (qtp1218754706-58640) [n:127.0.0.1:35906_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard2 r:core_node8 x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n7] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1 [junit4] 2> 1671995 INFO (qtp1218754706-58640) [n:127.0.0.1:35906_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard2 r:core_node8 x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n7] o.a.s.h.ReplicationHandler Commits will be reserved for 10000ms. [junit4] 2> 1671996 INFO (qtp1218754706-58640) [n:127.0.0.1:35906_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard2 r:core_node8 x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n7] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1605256086501195776 [junit4] 2> 1671998 INFO (searcherExecutor-5631-thread-1-processing-n:127.0.0.1:35906_solr x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n7 c:MoveReplicaHDFSTest_failed_coll_true s:shard2 r:core_node8) [n:127.0.0.1:35906_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard2 r:core_node8 x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n7] o.a.s.c.SolrCore [MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n7] Registered new searcher Searcher@4b4ff2d7[MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n7] main{ExitableDirectoryReader(UninvertingDirectoryReader())} [junit4] 2> 1672002 INFO (qtp1218754706-58640) [n:127.0.0.1:35906_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard2 r:core_node8 x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n7] o.a.s.c.ZkShardTerms Successful update of terms at /collections/MoveReplicaHDFSTest_failed_coll_true/terms/shard2 to Terms{values={core_node8=0}, version=0} [junit4] 2> 1672005 INFO (qtp1218754706-58640) [n:127.0.0.1:35906_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard2 r:core_node8 x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n7] o.a.s.c.ShardLeaderElectionContext Waiting until we see more replicas up for shard shard2: total=2 found=1 timeoutin=9999ms [junit4] 2> 1672045 INFO (qtp1350213693-58630) [n:127.0.0.1:48507_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard2 r:core_node6 x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n5] o.a.s.s.SolrIndexSearcher Opening [Searcher@72552a2c[MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n5] main] [junit4] 2> 1672046 INFO (qtp1350213693-58630) [n:127.0.0.1:48507_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard2 r:core_node6 x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n5] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/conf1 [junit4] 2> 1672046 INFO (qtp1350213693-58630) [n:127.0.0.1:48507_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard2 r:core_node6 x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n5] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1 [junit4] 2> 1672047 INFO (qtp1350213693-58630) [n:127.0.0.1:48507_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard2 r:core_node6 x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n5] o.a.s.h.ReplicationHandler Commits will be reserved for 10000ms. [junit4] 2> 1672047 INFO (qtp1350213693-58630) [n:127.0.0.1:48507_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard2 r:core_node6 x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n5] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1605256086554673152 [junit4] 2> 1672054 INFO (searcherExecutor-5633-thread-1-processing-n:127.0.0.1:48507_solr x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n5 c:MoveReplicaHDFSTest_failed_coll_true s:shard2 r:core_node6) [n:127.0.0.1:48507_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard2 r:core_node6 x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n5] o.a.s.c.SolrCore [MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n5] Registered new searcher Searcher@72552a2c[MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n5] main{ExitableDirectoryReader(UninvertingDirectoryReader())} [junit4] 2> 1672056 INFO (qtp1350213693-58630) [n:127.0.0.1:48507_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard2 r:core_node6 x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n5] o.a.s.c.ZkShardTerms Successful update of terms at /collections/MoveReplicaHDFSTest_failed_coll_true/terms/shard2 to Terms{values={core_node6=0, core_node8=0}, version=1} [junit4] 2> 1672454 INFO (qtp1626468552-58610) [n:127.0.0.1:52485_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node4 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n3] o.a.s.c.ShardLeaderElectionContext Enough replicas found to continue. [junit4] 2> 1672454 INFO (qtp1626468552-58610) [n:127.0.0.1:52485_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node4 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n3] o.a.s.c.ShardLeaderElectionContext I may be the new leader - try and sync [junit4] 2> 1672454 INFO (qtp1626468552-58610) [n:127.0.0.1:52485_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node4 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n3] o.a.s.c.SyncStrategy Sync replicas to https://127.0.0.1:52485/solr/MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n3/ [junit4] 2> 1672454 INFO (qtp1626468552-58610) [n:127.0.0.1:52485_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node4 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n3] o.a.s.u.PeerSync PeerSync: core=MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n3 url=https://127.0.0.1:52485/solr START replicas=[https://127.0.0.1:37237/solr/MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n1/] nUpdates=100 [junit4] 2> 1672457 INFO (qtp1626468552-58610) [n:127.0.0.1:52485_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node4 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n3] o.a.s.u.PeerSync PeerSync: core=MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n3 url=https://127.0.0.1:52485/solr DONE. We have no versions. sync failed. [junit4] 2> 1672468 INFO (qtp1936336053-58620) [n:127.0.0.1:37237_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node2 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n1] o.a.s.c.S.Request [MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n1] webapp=/solr path=/get params={distrib=false&qt=/get&fingerprint=false&getVersions=100&wt=javabin&version=2} status=0 QTime=0 [junit4] 2> 1672469 INFO (qtp1626468552-58610) [n:127.0.0.1:52485_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node4 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n3] o.a.s.c.SyncStrategy Leader's attempt to sync with shard failed, moving to the next candidate [junit4] 2> 1672469 INFO (qtp1626468552-58610) [n:127.0.0.1:52485_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node4 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n3] o.a.s.c.ShardLeaderElectionContext We failed sync, but we have no versions - we can't sync in that case - we were active before, so become leader anyway [junit4] 2> 1672469 INFO (qtp1626468552-58610) [n:127.0.0.1:52485_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node4 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n3] o.a.s.c.ShardLeaderElectionContext Found all replicas participating in election, clear LIR [junit4] 2> 1672471 INFO (qtp1626468552-58610) [n:127.0.0.1:52485_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard1 r:core_node4 x:MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n3] o.a.s.c.ShardLeaderElectionContext I am the new leader: https://127.0.0.1:52485/solr/MoveReplicaHDFSTest_failed_coll_true_shard1_replica_n3/ shard1 [junit4] 2> 1672506 INFO (qtp1218754706-58640) [n:127.0.0.1:35906_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard2 r:core_node8 x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n7] o.a.s.c.ShardLeaderElectionContext Enough replicas found to continue. [junit4] 2> 1672506 INFO (qtp1218754706-58640) [n:127.0.0.1:35906_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard2 r:core_node8 x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n7] o.a.s.c.ShardLeaderElectionContext I may be the new leader - try and sync [junit4] 2> 1672506 INFO (qtp1218754706-58640) [n:127.0.0.1:35906_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard2 r:core_node8 x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n7] o.a.s.c.SyncStrategy Sync replicas to https://127.0.0.1:35906/solr/MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n7/ [junit4] 2> 1672506 INFO (qtp1218754706-58640) [n:127.0.0.1:35906_solr c:MoveReplicaHDFSTest_failed_coll_true s:shard2 r:core_node8 x:MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n7] o.a.s.u.PeerSync PeerSync: core=MoveReplicaHDFSTest_failed_coll_true_shard2_replica_n7 url= [...truncated too long message...] e/top-level-ivy-settings.xml resolve: ivy-availability-check: [loadresource] Do not set property disallowed.ivy.jars.list as its length is 0. -ivy-fail-disallowed-ivy-version: ivy-fail: ivy-fail: ivy-configure: [ivy:configure] :: loading settings :: file = /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.x/checkout/lucene/top-level-ivy-settings.xml resolve: ivy-availability-check: [loadresource] Do not set property disallowed.ivy.jars.list as its length is 0. -ivy-fail-disallowed-ivy-version: ivy-fail: ivy-configure: [ivy:configure] :: loading settings :: file = /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.x/checkout/lucene/top-level-ivy-settings.xml resolve: ivy-availability-check: [loadresource] Do not set property disallowed.ivy.jars.list as its length is 0. -ivy-fail-disallowed-ivy-version: ivy-fail: ivy-fail: ivy-configure: [ivy:configure] :: loading settings :: file = /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.x/checkout/lucene/top-level-ivy-settings.xml resolve: ivy-availability-check: [loadresource] Do not set property disallowed.ivy.jars.list as its length is 0. -ivy-fail-disallowed-ivy-version: ivy-fail: ivy-fail: ivy-configure: [ivy:configure] :: loading settings :: file = /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.x/checkout/lucene/top-level-ivy-settings.xml resolve: ivy-availability-check: [loadresource] Do not set property disallowed.ivy.jars.list as its length is 0. -ivy-fail-disallowed-ivy-version: ivy-fail: ivy-fail: ivy-configure: [ivy:configure] :: loading settings :: file = /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.x/checkout/lucene/top-level-ivy-settings.xml resolve: ivy-availability-check: [loadresource] Do not set property disallowed.ivy.jars.list as its length is 0. -ivy-fail-disallowed-ivy-version: ivy-fail: ivy-fail: ivy-configure: [ivy:configure] :: loading settings :: file = /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.x/checkout/lucene/top-level-ivy-settings.xml resolve: ivy-availability-check: [loadresource] Do not set property disallowed.ivy.jars.list as its length is 0. -ivy-fail-disallowed-ivy-version: ivy-fail: ivy-fail: ivy-configure: [ivy:configure] :: loading settings :: file = /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.x/checkout/lucene/top-level-ivy-settings.xml resolve: ivy-availability-check: [loadresource] Do not set property disallowed.ivy.jars.list as its length is 0. -ivy-fail-disallowed-ivy-version: ivy-fail: ivy-fail: ivy-configure: [ivy:configure] :: loading settings :: file = /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.x/checkout/lucene/top-level-ivy-settings.xml resolve: ivy-availability-check: [loadresource] Do not set property disallowed.ivy.jars.list as its length is 0. -ivy-fail-disallowed-ivy-version: ivy-fail: ivy-fail: ivy-configure: [ivy:configure] :: loading settings :: file = /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.x/checkout/lucene/top-level-ivy-settings.xml resolve: ivy-availability-check: [loadresource] Do not set property disallowed.ivy.jars.list as its length is 0. -ivy-fail-disallowed-ivy-version: ivy-fail: ivy-fail: ivy-configure: [ivy:configure] :: loading settings :: file = /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.x/checkout/lucene/top-level-ivy-settings.xml resolve: ivy-availability-check: [loadresource] Do not set property disallowed.ivy.jars.list as its length is 0. -ivy-fail-disallowed-ivy-version: ivy-fail: ivy-fail: ivy-configure: [ivy:configure] :: loading settings :: file = /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.x/checkout/lucene/top-level-ivy-settings.xml resolve: ivy-availability-check: [loadresource] Do not set property disallowed.ivy.jars.list as its length is 0. -ivy-fail-disallowed-ivy-version: ivy-fail: ivy-fail: ivy-configure: [ivy:configure] :: loading settings :: file = /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.x/checkout/lucene/top-level-ivy-settings.xml resolve: ivy-availability-check: [loadresource] Do not set property disallowed.ivy.jars.list as its length is 0. -ivy-fail-disallowed-ivy-version: ivy-fail: ivy-fail: ivy-configure: [ivy:configure] :: loading settings :: file = /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.x/checkout/lucene/top-level-ivy-settings.xml resolve: jar-checksums: [mkdir] Created dir: /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.x/checkout/solr/null311791968 [copy] Copying 247 files to /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.x/checkout/solr/null311791968 [delete] Deleting directory /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.x/checkout/solr/null311791968 check-working-copy: [ivy:cachepath] :: resolving dependencies :: org.eclipse.jgit#org.eclipse.jgit-caller;working [ivy:cachepath] confs: [default] [ivy:cachepath] found org.eclipse.jgit#org.eclipse.jgit;4.6.0.201612231935-r in public [ivy:cachepath] found com.jcraft#jsch;0.1.53 in public [ivy:cachepath] found com.googlecode.javaewah#JavaEWAH;1.1.6 in public [ivy:cachepath] found org.apache.httpcomponents#httpclient;4.3.6 in public [ivy:cachepath] found org.apache.httpcomponents#httpcore;4.3.3 in public [ivy:cachepath] found commons-logging#commons-logging;1.1.3 in public [ivy:cachepath] found commons-codec#commons-codec;1.6 in public [ivy:cachepath] found org.slf4j#slf4j-api;1.7.2 in public [ivy:cachepath] :: resolution report :: resolve 256ms :: artifacts dl 12ms --------------------------------------------------------------------- | | modules || artifacts | | conf | number| search|dwnlded|evicted|| number|dwnlded| --------------------------------------------------------------------- | default | 8 | 0 | 0 | 0 || 8 | 0 | --------------------------------------------------------------------- [wc-checker] Initializing working copy... [wc-checker] SLF4J: Failed to load class "org.slf4j.impl.StaticLoggerBinder". [wc-checker] SLF4J: Defaulting to no-operation (NOP) logger implementation [wc-checker] SLF4J: See http://www.slf4j.org/codes.html#StaticLoggerBinder for further details. [wc-checker] Checking working copy status... -jenkins-base: BUILD SUCCESSFUL Total time: 363 minutes 33 seconds Archiving artifacts java.lang.InterruptedException: no matches found within 10000 at hudson.FilePath$34.hasMatch(FilePath.java:2678) at hudson.FilePath$34.invoke(FilePath.java:2557) at hudson.FilePath$34.invoke(FilePath.java:2547) at hudson.FilePath$FileCallableWrapper.call(FilePath.java:2918) Also: hudson.remoting.Channel$CallSiteStackTrace: Remote call to lucene at hudson.remoting.Channel.attachCallSiteStackTrace(Channel.java:1741) at hudson.remoting.UserRequest$ExceptionResponse.retrieve(UserRequest.java:357) at hudson.remoting.Channel.call(Channel.java:955) at hudson.FilePath.act(FilePath.java:1036) at hudson.FilePath.act(FilePath.java:1025) at hudson.FilePath.validateAntFileMask(FilePath.java:2547) at hudson.tasks.ArtifactArchiver.perform(ArtifactArchiver.java:243) at hudson.tasks.BuildStepCompatibilityLayer.perform(BuildStepCompatibilityLayer.java:81) at hudson.tasks.BuildStepMonitor$1.perform(BuildStepMonitor.java:20) at hudson.model.AbstractBuild$AbstractBuildExecution.perform(AbstractBuild.java:744) at hudson.model.AbstractBuild$AbstractBuildExecution.performAllBuildSteps(AbstractBuild.java:690) at hudson.model.Build$BuildExecution.post2(Build.java:186) at hudson.model.AbstractBuild$AbstractBuildExecution.post(AbstractBuild.java:635) at hudson.model.Run.execute(Run.java:1819) at hudson.model.FreeStyleBuild.run(FreeStyleBuild.java:43) at hudson.model.ResourceController.execute(ResourceController.java:97) at hudson.model.Executor.run(Executor.java:429) Caused: hudson.FilePath$TunneledInterruptedException at hudson.FilePath$FileCallableWrapper.call(FilePath.java:2920) at hudson.remoting.UserRequest.perform(UserRequest.java:212) at hudson.remoting.UserRequest.perform(UserRequest.java:54) at hudson.remoting.Request$2.run(Request.java:369) at hudson.remoting.InterceptingExecutorService$1.call(InterceptingExecutorService.java:72) at java.util.concurrent.FutureTask.run(FutureTask.java:266) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:748) Caused: java.lang.InterruptedException: java.lang.InterruptedException: no matches found within 10000 at hudson.FilePath.act(FilePath.java:1038) at hudson.FilePath.act(FilePath.java:1025) at hudson.FilePath.validateAntFileMask(FilePath.java:2547) at hudson.tasks.ArtifactArchiver.perform(ArtifactArchiver.java:243) at hudson.tasks.BuildStepCompatibilityLayer.perform(BuildStepCompatibilityLayer.java:81) at hudson.tasks.BuildStepMonitor$1.perform(BuildStepMonitor.java:20) at hudson.model.AbstractBuild$AbstractBuildExecution.perform(AbstractBuild.java:744) at hudson.model.AbstractBuild$AbstractBuildExecution.performAllBuildSteps(AbstractBuild.java:690) at hudson.model.Build$BuildExecution.post2(Build.java:186) at hudson.model.AbstractBuild$AbstractBuildExecution.post(AbstractBuild.java:635) at hudson.model.Run.execute(Run.java:1819) at hudson.model.FreeStyleBuild.run(FreeStyleBuild.java:43) at hudson.model.ResourceController.execute(ResourceController.java:97) at hudson.model.Executor.run(Executor.java:429) No artifacts found that match the file pattern "**/*.events,heapdumps/**,**/hs_err_pid*". Configuration error? Recording test results Build step 'Publish JUnit test result report' changed build result to UNSTABLE Email was triggered for: Unstable (Test Failures) Sending email for trigger: Unstable (Test Failures)
--------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
