Build: https://builds.apache.org/job/Lucene-Solr-Tests-7.0/128/
1 tests failed. FAILED: org.apache.solr.cloud.TestHdfsCloudBackupRestore.test Error Message: Error from server at https://127.0.0.1:44216/solr: Could not restore core Stack Trace: org.apache.solr.client.solrj.impl.HttpSolrClient$RemoteSolrException: Error from server at https://127.0.0.1:44216/solr: Could not restore core at __randomizedtesting.SeedInfo.seed([259AE5AA5F7E8D83:ADCEDA70F182E07B]:0) at org.apache.solr.client.solrj.impl.HttpSolrClient.executeMethod(HttpSolrClient.java:627) at org.apache.solr.client.solrj.impl.HttpSolrClient.request(HttpSolrClient.java:253) at org.apache.solr.client.solrj.impl.HttpSolrClient.request(HttpSolrClient.java:242) at org.apache.solr.client.solrj.impl.LBHttpSolrClient.doRequest(LBHttpSolrClient.java:483) at org.apache.solr.client.solrj.impl.LBHttpSolrClient.request(LBHttpSolrClient.java:413) at org.apache.solr.client.solrj.impl.CloudSolrClient.sendRequest(CloudSolrClient.java:1121) at org.apache.solr.client.solrj.impl.CloudSolrClient.requestWithRetryOnStaleState(CloudSolrClient.java:862) at org.apache.solr.client.solrj.impl.CloudSolrClient.request(CloudSolrClient.java:793) at org.apache.solr.client.solrj.SolrRequest.process(SolrRequest.java:178) at org.apache.solr.client.solrj.SolrRequest.process(SolrRequest.java:195) at org.apache.solr.cloud.AbstractCloudBackupRestoreTestCase.testBackupAndRestore(AbstractCloudBackupRestoreTestCase.java:275) at org.apache.solr.cloud.AbstractCloudBackupRestoreTestCase.test(AbstractCloudBackupRestoreTestCase.java:136) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1713) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:907) at com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:943) at com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:957) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817) at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468) at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:916) at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:802) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:852) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:863) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at java.lang.Thread.run(Thread.java:748) Build Log: [...truncated 11175 lines...] [junit4] Suite: org.apache.solr.cloud.TestHdfsCloudBackupRestore [junit4] 2> Creating dataDir: /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.0/solr/build/solr-core/test/J2/temp/solr.cloud.TestHdfsCloudBackupRestore_259AE5AA5F7E8D83-001/init-core-data-001 [junit4] 2> 1114598 WARN (SUITE-TestHdfsCloudBackupRestore-seed#[259AE5AA5F7E8D83]-worker) [ ] o.a.s.SolrTestCaseJ4 startTrackingSearchers: numOpens=1 numCloses=1 [junit4] 2> 1114619 INFO (SUITE-TestHdfsCloudBackupRestore-seed#[259AE5AA5F7E8D83]-worker) [ ] o.a.s.SolrTestCaseJ4 Using PointFields (NUMERIC_POINTS_SYSPROP=true) w/NUMERIC_DOCVALUES_SYSPROP=false [junit4] 2> 1114620 INFO (SUITE-TestHdfsCloudBackupRestore-seed#[259AE5AA5F7E8D83]-worker) [ ] o.a.s.SolrTestCaseJ4 Randomized ssl (true) and clientAuth (false) via: @org.apache.solr.util.RandomizeSSL(reason=, ssl=NaN, value=NaN, clientAuth=NaN) [junit4] 2> 1118587 WARN (SUITE-TestHdfsCloudBackupRestore-seed#[259AE5AA5F7E8D83]-worker) [ ] o.a.h.u.NativeCodeLoader Unable to load native-hadoop library for your platform... using builtin-java classes where applicable [junit4] 1> Formatting using clusterid: testClusterID [junit4] 2> 1122670 WARN (SUITE-TestHdfsCloudBackupRestore-seed#[259AE5AA5F7E8D83]-worker) [ ] o.a.h.m.i.MetricsConfig Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties [junit4] 2> 1123409 INFO (SUITE-TestHdfsCloudBackupRestore-seed#[259AE5AA5F7E8D83]-worker) [ ] o.m.log Logging to org.slf4j.impl.Log4jLoggerAdapter(org.mortbay.log) via org.mortbay.log.Slf4jLog [junit4] 2> 1123509 WARN (SUITE-TestHdfsCloudBackupRestore-seed#[259AE5AA5F7E8D83]-worker) [ ] o.a.h.h.HttpRequestLog Jetty request log can only be enabled using Log4j [junit4] 2> 1125016 INFO (SUITE-TestHdfsCloudBackupRestore-seed#[259AE5AA5F7E8D83]-worker) [ ] o.m.log jetty-6.1.26 [junit4] 2> 1125243 INFO (SUITE-TestHdfsCloudBackupRestore-seed#[259AE5AA5F7E8D83]-worker) [ ] o.m.log Extract jar:file:/home/jenkins/.ivy2/cache/org.apache.hadoop/hadoop-hdfs/tests/hadoop-hdfs-2.7.4-tests.jar!/webapps/hdfs to ./temp/Jetty_lucene2.us.west_apache_org_34435_hdfs____yxbrpb/webapp [junit4] 2> 1128187 INFO (SUITE-TestHdfsCloudBackupRestore-seed#[259AE5AA5F7E8D83]-worker) [ ] o.m.log Started HttpServer2$selectchannelconnectorwithsafestar...@lucene2-us-west.apache.org:34435 [junit4] 2> 1134895 WARN (SUITE-TestHdfsCloudBackupRestore-seed#[259AE5AA5F7E8D83]-worker) [ ] o.a.h.h.HttpRequestLog Jetty request log can only be enabled using Log4j [junit4] 2> 1134899 INFO (SUITE-TestHdfsCloudBackupRestore-seed#[259AE5AA5F7E8D83]-worker) [ ] o.m.log jetty-6.1.26 [junit4] 2> 1135061 INFO (SUITE-TestHdfsCloudBackupRestore-seed#[259AE5AA5F7E8D83]-worker) [ ] o.m.log Extract jar:file:/home/jenkins/.ivy2/cache/org.apache.hadoop/hadoop-hdfs/tests/hadoop-hdfs-2.7.4-tests.jar!/webapps/datanode to ./temp/Jetty_localhost_42149_datanode____f3mqe7/webapp [junit4] 2> 1136836 INFO (SUITE-TestHdfsCloudBackupRestore-seed#[259AE5AA5F7E8D83]-worker) [ ] o.m.log Started HttpServer2$SelectChannelConnectorWithSafeStartup@localhost:42149 [junit4] 2> 1140207 WARN (SUITE-TestHdfsCloudBackupRestore-seed#[259AE5AA5F7E8D83]-worker) [ ] o.a.h.h.HttpRequestLog Jetty request log can only be enabled using Log4j [junit4] 2> 1140208 INFO (SUITE-TestHdfsCloudBackupRestore-seed#[259AE5AA5F7E8D83]-worker) [ ] o.m.log jetty-6.1.26 [junit4] 2> 1140407 INFO (SUITE-TestHdfsCloudBackupRestore-seed#[259AE5AA5F7E8D83]-worker) [ ] o.m.log Extract jar:file:/home/jenkins/.ivy2/cache/org.apache.hadoop/hadoop-hdfs/tests/hadoop-hdfs-2.7.4-tests.jar!/webapps/datanode to ./temp/Jetty_localhost_33055_datanode____.tlv9ux/webapp [junit4] 2> 1142531 INFO (SUITE-TestHdfsCloudBackupRestore-seed#[259AE5AA5F7E8D83]-worker) [ ] o.m.log Started HttpServer2$SelectChannelConnectorWithSafeStartup@localhost:33055 [junit4] 2> 1147356 ERROR (DataNode: [[[DISK]file:/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.0/solr/build/solr-core/test/J2/temp/solr.cloud.TestHdfsCloudBackupRestore_259AE5AA5F7E8D83-001/tempDir-001/hdfsBaseDir/data/data3/, [DISK]file:/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.0/solr/build/solr-core/test/J2/temp/solr.cloud.TestHdfsCloudBackupRestore_259AE5AA5F7E8D83-001/tempDir-001/hdfsBaseDir/data/data4/]] heartbeating to lucene2-us-west.apache.org/127.0.0.1:41502) [ ] o.a.h.h.s.d.DirectoryScanner dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value below 1 ms/sec. Assuming default value of 1000 [junit4] 2> 1147357 ERROR (DataNode: [[[DISK]file:/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.0/solr/build/solr-core/test/J2/temp/solr.cloud.TestHdfsCloudBackupRestore_259AE5AA5F7E8D83-001/tempDir-001/hdfsBaseDir/data/data1/, [DISK]file:/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.0/solr/build/solr-core/test/J2/temp/solr.cloud.TestHdfsCloudBackupRestore_259AE5AA5F7E8D83-001/tempDir-001/hdfsBaseDir/data/data2/]] heartbeating to lucene2-us-west.apache.org/127.0.0.1:41502) [ ] o.a.h.h.s.d.DirectoryScanner dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value below 1 ms/sec. Assuming default value of 1000 [junit4] 2> 1149382 INFO (Block report processor) [ ] BlockStateChange BLOCK* processReport 0x119316c4547659: from storage DS-0d4b0695-cc7a-480f-8b08-20a9548d51b4 node DatanodeRegistration(127.0.0.1:34041, datanodeUuid=b9034229-d4b4-4659-8f4d-daf6c20c98ca, infoPort=34200, infoSecurePort=0, ipcPort=43406, storageInfo=lv=-56;cid=testClusterID;nsid=1039319792;c=0), blocks: 0, hasStaleStorage: true, processing time: 52 msecs [junit4] 2> 1149382 INFO (Block report processor) [ ] BlockStateChange BLOCK* processReport 0x119316c37c5fa3: from storage DS-24375fcd-396e-4197-8084-0bd7605e3a02 node DatanodeRegistration(127.0.0.1:33332, datanodeUuid=f69f869b-b626-4231-a304-d4d0d72a1ac8, infoPort=38392, infoSecurePort=0, ipcPort=42283, storageInfo=lv=-56;cid=testClusterID;nsid=1039319792;c=0), blocks: 0, hasStaleStorage: true, processing time: 1 msecs [junit4] 2> 1149387 INFO (Block report processor) [ ] BlockStateChange BLOCK* processReport 0x119316c4547659: from storage DS-5aabddfa-0089-4833-b3eb-ed353be7b23d node DatanodeRegistration(127.0.0.1:34041, datanodeUuid=b9034229-d4b4-4659-8f4d-daf6c20c98ca, infoPort=34200, infoSecurePort=0, ipcPort=43406, storageInfo=lv=-56;cid=testClusterID;nsid=1039319792;c=0), blocks: 0, hasStaleStorage: false, processing time: 0 msecs [junit4] 2> 1149387 INFO (Block report processor) [ ] BlockStateChange BLOCK* processReport 0x119316c37c5fa3: from storage DS-7ec3b893-f359-4431-adf3-2c77bac5a4bf node DatanodeRegistration(127.0.0.1:33332, datanodeUuid=f69f869b-b626-4231-a304-d4d0d72a1ac8, infoPort=38392, infoSecurePort=0, ipcPort=42283, storageInfo=lv=-56;cid=testClusterID;nsid=1039319792;c=0), blocks: 0, hasStaleStorage: false, processing time: 0 msecs [junit4] 2> 1151494 INFO (SUITE-TestHdfsCloudBackupRestore-seed#[259AE5AA5F7E8D83]-worker) [ ] o.a.s.c.MiniSolrCloudCluster Starting cluster of 2 servers in /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.0/solr/build/solr-core/test/J2/temp/solr.cloud.TestHdfsCloudBackupRestore_259AE5AA5F7E8D83-001/tempDir-002 [junit4] 2> 1151494 INFO (SUITE-TestHdfsCloudBackupRestore-seed#[259AE5AA5F7E8D83]-worker) [ ] o.a.s.c.ZkTestServer STARTING ZK TEST SERVER [junit4] 2> 1151513 INFO (Thread-486) [ ] o.a.s.c.ZkTestServer client port:0.0.0.0/0.0.0.0:0 [junit4] 2> 1151513 INFO (Thread-486) [ ] o.a.s.c.ZkTestServer Starting server [junit4] 2> 1151529 ERROR (Thread-486) [ ] o.a.z.s.ZooKeeperServer ZKShutdownHandler is not registered, so ZooKeeper server won't take any action on ERROR or SHUTDOWN server state changes [junit4] 2> 1151613 INFO (SUITE-TestHdfsCloudBackupRestore-seed#[259AE5AA5F7E8D83]-worker) [ ] o.a.s.c.ZkTestServer start zk server on port:46742 [junit4] 2> 1151913 INFO (jetty-launcher-325-thread-2) [ ] o.e.j.s.Server jetty-9.3.14.v20161028 [junit4] 2> 1151944 INFO (jetty-launcher-325-thread-1) [ ] o.e.j.s.Server jetty-9.3.14.v20161028 [junit4] 2> 1152009 INFO (jetty-launcher-325-thread-1) [ ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@5c312b53{/solr,null,AVAILABLE} [junit4] 2> 1152009 INFO (jetty-launcher-325-thread-1) [ ] o.e.j.s.AbstractConnector Started ServerConnector@7c85f183{SSL,[ssl, http/1.1]}{127.0.0.1:40618} [junit4] 2> 1152009 INFO (jetty-launcher-325-thread-1) [ ] o.e.j.s.Server Started @1166245ms [junit4] 2> 1152009 INFO (jetty-launcher-325-thread-1) [ ] o.a.s.c.s.e.JettySolrRunner Jetty properties: {hostContext=/solr, hostPort=40618} [junit4] 2> 1152010 ERROR (jetty-launcher-325-thread-1) [ ] o.a.s.s.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be missing or incomplete. [junit4] 2> 1152010 INFO (jetty-launcher-325-thread-1) [ ] o.a.s.s.SolrDispatchFilter ___ _ Welcome to Apache Solr? version 7.0.0 [junit4] 2> 1152010 INFO (jetty-launcher-325-thread-1) [ ] o.a.s.s.SolrDispatchFilter / __| ___| |_ _ Starting in cloud mode on port null [junit4] 2> 1152010 INFO (jetty-launcher-325-thread-1) [ ] o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_| Install dir: null, Default config dir: null [junit4] 2> 1152010 INFO (jetty-launcher-325-thread-1) [ ] o.a.s.s.SolrDispatchFilter |___/\___/_|_| Start time: 2017-09-08T02:24:54.139Z [junit4] 2> 1152192 INFO (jetty-launcher-325-thread-2) [ ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@aeb2a13{/solr,null,AVAILABLE} [junit4] 2> 1152192 INFO (jetty-launcher-325-thread-2) [ ] o.e.j.s.AbstractConnector Started ServerConnector@40b9679e{SSL,[ssl, http/1.1]}{127.0.0.1:44216} [junit4] 2> 1152193 INFO (jetty-launcher-325-thread-2) [ ] o.e.j.s.Server Started @1166428ms [junit4] 2> 1152193 INFO (jetty-launcher-325-thread-2) [ ] o.a.s.c.s.e.JettySolrRunner Jetty properties: {hostContext=/solr, hostPort=44216} [junit4] 2> 1152193 ERROR (jetty-launcher-325-thread-2) [ ] o.a.s.s.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be missing or incomplete. [junit4] 2> 1152193 INFO (jetty-launcher-325-thread-2) [ ] o.a.s.s.SolrDispatchFilter ___ _ Welcome to Apache Solr? version 7.0.0 [junit4] 2> 1152193 INFO (jetty-launcher-325-thread-2) [ ] o.a.s.s.SolrDispatchFilter / __| ___| |_ _ Starting in cloud mode on port null [junit4] 2> 1152193 INFO (jetty-launcher-325-thread-2) [ ] o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_| Install dir: null, Default config dir: null [junit4] 2> 1152193 INFO (jetty-launcher-325-thread-2) [ ] o.a.s.s.SolrDispatchFilter |___/\___/_|_| Start time: 2017-09-08T02:24:54.322Z [junit4] 2> 1152291 INFO (jetty-launcher-325-thread-1) [ ] o.a.s.s.SolrDispatchFilter solr.xml found in ZooKeeper. Loading... [junit4] 2> 1152346 INFO (jetty-launcher-325-thread-1) [ ] o.a.s.c.SolrXmlConfig MBean server found: com.sun.jmx.mbeanserver.JmxMBeanServer@3901f6d0, but no JMX reporters were configured - adding default JMX reporter. [junit4] 2> 1152419 INFO (jetty-launcher-325-thread-1) [ ] o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:46742/solr [junit4] 2> 1152469 INFO (jetty-launcher-325-thread-2) [ ] o.a.s.s.SolrDispatchFilter solr.xml found in ZooKeeper. Loading... [junit4] 2> 1152613 INFO (jetty-launcher-325-thread-2) [ ] o.a.s.c.SolrXmlConfig MBean server found: com.sun.jmx.mbeanserver.JmxMBeanServer@3901f6d0, but no JMX reporters were configured - adding default JMX reporter. [junit4] 2> 1152788 INFO (jetty-launcher-325-thread-2) [ ] o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:46742/solr [junit4] 2> 1153362 INFO (jetty-launcher-325-thread-2) [n:127.0.0.1:44216_solr ] o.a.s.c.Overseer Overseer (id=null) closing [junit4] 2> 1153362 INFO (jetty-launcher-325-thread-2) [n:127.0.0.1:44216_solr ] o.a.s.c.OverseerElectionContext I am going to be the leader 127.0.0.1:44216_solr [junit4] 2> 1153363 INFO (jetty-launcher-325-thread-2) [n:127.0.0.1:44216_solr ] o.a.s.c.Overseer Overseer (id=98621029983387654-127.0.0.1:44216_solr-n_0000000000) starting [junit4] 2> 1153553 INFO (jetty-launcher-325-thread-1) [n:127.0.0.1:40618_solr ] o.a.s.c.Overseer Overseer (id=null) closing [junit4] 2> 1153613 INFO (jetty-launcher-325-thread-2) [n:127.0.0.1:44216_solr ] o.a.s.c.ZkController Register node as live in ZooKeeper:/live_nodes/127.0.0.1:44216_solr [junit4] 2> 1153614 INFO (jetty-launcher-325-thread-1) [n:127.0.0.1:40618_solr ] o.a.s.c.ZkController Register node as live in ZooKeeper:/live_nodes/127.0.0.1:40618_solr [junit4] 2> 1153694 INFO (zkCallback-333-thread-1-processing-n:127.0.0.1:40618_solr) [n:127.0.0.1:40618_solr ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1) [junit4] 2> 1153706 INFO (OverseerStateUpdate-98621029983387654-127.0.0.1:44216_solr-n_0000000000) [n:127.0.0.1:44216_solr ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1) [junit4] 2> 1153795 INFO (zkCallback-338-thread-2-processing-n:127.0.0.1:44216_solr) [n:127.0.0.1:44216_solr ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2) [junit4] 2> 1153795 INFO (zkCallback-333-thread-1-processing-n:127.0.0.1:40618_solr) [n:127.0.0.1:40618_solr ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2) [junit4] 2> 1153912 INFO (jetty-launcher-325-thread-1) [n:127.0.0.1:40618_solr ] o.a.s.c.b.r.BackupRepositoryFactory Added backup repository with configuration params {type = repository,name = hdfs,class = org.apache.solr.core.backup.repository.HdfsBackupRepository,attributes = {name=hdfs, class=org.apache.solr.core.backup.repository.HdfsBackupRepository},args = {location=/backup,solr.hdfs.home=hdfs://lucene2-us-west.apache.org:41502/solr,solr.hdfs.confdir=}} [junit4] 2> 1153912 INFO (jetty-launcher-325-thread-1) [n:127.0.0.1:40618_solr ] o.a.s.c.b.r.BackupRepositoryFactory Default configuration for backup repository is with configuration params {type = repository,name = hdfs,class = org.apache.solr.core.backup.repository.HdfsBackupRepository,attributes = {name=hdfs, class=org.apache.solr.core.backup.repository.HdfsBackupRepository},args = {location=/backup,solr.hdfs.home=hdfs://lucene2-us-west.apache.org:41502/solr,solr.hdfs.confdir=}} [junit4] 2> 1154106 INFO (jetty-launcher-325-thread-2) [n:127.0.0.1:44216_solr ] o.a.s.c.b.r.BackupRepositoryFactory Added backup repository with configuration params {type = repository,name = hdfs,class = org.apache.solr.core.backup.repository.HdfsBackupRepository,attributes = {name=hdfs, class=org.apache.solr.core.backup.repository.HdfsBackupRepository},args = {location=/backup,solr.hdfs.home=hdfs://lucene2-us-west.apache.org:41502/solr,solr.hdfs.confdir=}} [junit4] 2> 1154120 INFO (jetty-launcher-325-thread-2) [n:127.0.0.1:44216_solr ] o.a.s.c.b.r.BackupRepositoryFactory Default configuration for backup repository is with configuration params {type = repository,name = hdfs,class = org.apache.solr.core.backup.repository.HdfsBackupRepository,attributes = {name=hdfs, class=org.apache.solr.core.backup.repository.HdfsBackupRepository},args = {location=/backup,solr.hdfs.home=hdfs://lucene2-us-west.apache.org:41502/solr,solr.hdfs.confdir=}} [junit4] 2> 1154138 INFO (jetty-launcher-325-thread-1) [n:127.0.0.1:40618_solr ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.node' (registry 'solr.node') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@3901f6d0 [junit4] 2> 1154299 INFO (jetty-launcher-325-thread-1) [n:127.0.0.1:40618_solr ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jvm' (registry 'solr.jvm') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@3901f6d0 [junit4] 2> 1154299 INFO (jetty-launcher-325-thread-1) [n:127.0.0.1:40618_solr ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jetty' (registry 'solr.jetty') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@3901f6d0 [junit4] 2> 1154300 INFO (jetty-launcher-325-thread-1) [n:127.0.0.1:40618_solr ] o.a.s.c.CorePropertiesLocator Found 0 core definitions underneath /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.0/solr/build/solr-core/test/J2/temp/solr.cloud.TestHdfsCloudBackupRestore_259AE5AA5F7E8D83-001/tempDir-002/node2/. [junit4] 2> 1154432 INFO (jetty-launcher-325-thread-2) [n:127.0.0.1:44216_solr ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.node' (registry 'solr.node') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@3901f6d0 [junit4] 2> 1154501 INFO (jetty-launcher-325-thread-2) [n:127.0.0.1:44216_solr ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jvm' (registry 'solr.jvm') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@3901f6d0 [junit4] 2> 1154501 INFO (jetty-launcher-325-thread-2) [n:127.0.0.1:44216_solr ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jetty' (registry 'solr.jetty') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@3901f6d0 [junit4] 2> 1154531 INFO (jetty-launcher-325-thread-2) [n:127.0.0.1:44216_solr ] o.a.s.c.CorePropertiesLocator Found 0 core definitions underneath /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.0/solr/build/solr-core/test/J2/temp/solr.cloud.TestHdfsCloudBackupRestore_259AE5AA5F7E8D83-001/tempDir-002/node1/. [junit4] 2> 1154873 INFO (SUITE-TestHdfsCloudBackupRestore-seed#[259AE5AA5F7E8D83]-worker) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (2) [junit4] 2> 1154873 INFO (SUITE-TestHdfsCloudBackupRestore-seed#[259AE5AA5F7E8D83]-worker) [ ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:46742/solr ready [junit4] 2> 1155135 INFO (TEST-TestHdfsCloudBackupRestore.test-seed#[259AE5AA5F7E8D83]) [ ] o.a.s.SolrTestCaseJ4 ###Starting test [junit4] 2> 1155268 INFO (qtp1259474705-2338) [n:127.0.0.1:44216_solr ] o.a.s.h.a.CollectionsHandler Invoked Collection Action :create with params replicationFactor=1&collection.configName=conf1&version=2&pullReplicas=0&property.customKey=customValue&maxShardsPerNode=2&router.field=shard_s&autoAddReplicas=true&name=hdfsbackuprestore&nrtReplicas=1&action=CREATE&numShards=2&tlogReplicas=1&wt=javabin and sendToOCPQueue=true [junit4] 2> 1155314 INFO (OverseerThreadFactory-1330-thread-1-processing-n:127.0.0.1:44216_solr) [n:127.0.0.1:44216_solr ] o.a.s.c.CreateCollectionCmd Create collection hdfsbackuprestore [junit4] 2> 1155430 INFO (OverseerStateUpdate-98621029983387654-127.0.0.1:44216_solr-n_0000000000) [n:127.0.0.1:44216_solr ] o.a.s.c.o.SliceMutator createReplica() { [junit4] 2> "operation":"ADDREPLICA", [junit4] 2> "collection":"hdfsbackuprestore", [junit4] 2> "shard":"shard1", [junit4] 2> "core":"hdfsbackuprestore_shard1_replica_n1", [junit4] 2> "state":"down", [junit4] 2> "base_url":"https://127.0.0.1:44216/solr", [junit4] 2> "type":"NRT"} [junit4] 2> 1155444 INFO (OverseerStateUpdate-98621029983387654-127.0.0.1:44216_solr-n_0000000000) [n:127.0.0.1:44216_solr ] o.a.s.c.o.SliceMutator createReplica() { [junit4] 2> "operation":"ADDREPLICA", [junit4] 2> "collection":"hdfsbackuprestore", [junit4] 2> "shard":"shard1", [junit4] 2> "core":"hdfsbackuprestore_shard1_replica_t1", [junit4] 2> "state":"down", [junit4] 2> "base_url":"https://127.0.0.1:40618/solr", [junit4] 2> "type":"TLOG"} [junit4] 2> 1155445 INFO (OverseerStateUpdate-98621029983387654-127.0.0.1:44216_solr-n_0000000000) [n:127.0.0.1:44216_solr ] o.a.s.c.o.SliceMutator createReplica() { [junit4] 2> "operation":"ADDREPLICA", [junit4] 2> "collection":"hdfsbackuprestore", [junit4] 2> "shard":"shard2", [junit4] 2> "core":"hdfsbackuprestore_shard2_replica_n1", [junit4] 2> "state":"down", [junit4] 2> "base_url":"https://127.0.0.1:44216/solr", [junit4] 2> "type":"NRT"} [junit4] 2> 1155446 INFO (OverseerStateUpdate-98621029983387654-127.0.0.1:44216_solr-n_0000000000) [n:127.0.0.1:44216_solr ] o.a.s.c.o.SliceMutator createReplica() { [junit4] 2> "operation":"ADDREPLICA", [junit4] 2> "collection":"hdfsbackuprestore", [junit4] 2> "shard":"shard2", [junit4] 2> "core":"hdfsbackuprestore_shard2_replica_t1", [junit4] 2> "state":"down", [junit4] 2> "base_url":"https://127.0.0.1:40618/solr", [junit4] 2> "type":"TLOG"} [junit4] 2> 1155936 INFO (qtp1259474705-2335) [n:127.0.0.1:44216_solr ] o.a.s.h.a.CoreAdminOperation core create command qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore&version=2&replicaType=NRT&property.customKey=customValue&coreNodeName=core_node1&name=hdfsbackuprestore_shard1_replica_n1&action=CREATE&numShards=2&shard=shard1&wt=javabin [junit4] 2> 1155936 INFO (qtp1259474705-2335) [n:127.0.0.1:44216_solr ] o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 2147483647 transient cores [junit4] 2> 1155936 INFO (qtp1259474705-2332) [n:127.0.0.1:44216_solr ] o.a.s.h.a.CoreAdminOperation core create command qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore&version=2&replicaType=NRT&property.customKey=customValue&coreNodeName=core_node3&name=hdfsbackuprestore_shard2_replica_n1&action=CREATE&numShards=2&shard=shard2&wt=javabin [junit4] 2> 1156126 INFO (qtp1452068201-2347) [n:127.0.0.1:40618_solr ] o.a.s.h.a.CoreAdminOperation core create command qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore&version=2&replicaType=TLOG&property.customKey=customValue&coreNodeName=core_node2&name=hdfsbackuprestore_shard1_replica_t1&action=CREATE&numShards=2&shard=shard1&wt=javabin [junit4] 2> 1156156 INFO (qtp1452068201-2347) [n:127.0.0.1:40618_solr ] o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 2147483647 transient cores [junit4] 2> 1156160 INFO (zkCallback-338-thread-2-processing-n:127.0.0.1:44216_solr) [n:127.0.0.1:44216_solr ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/hdfsbackuprestore/state.json] for collection [hdfsbackuprestore] has occurred - updating... (live nodes size: [2]) [junit4] 2> 1156221 INFO (qtp1452068201-2341) [n:127.0.0.1:40618_solr ] o.a.s.h.a.CoreAdminOperation core create command qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore&version=2&replicaType=TLOG&property.customKey=customValue&coreNodeName=core_node4&name=hdfsbackuprestore_shard2_replica_t1&action=CREATE&numShards=2&shard=shard2&wt=javabin [junit4] 2> 1156330 INFO (zkCallback-333-thread-1-processing-n:127.0.0.1:40618_solr) [n:127.0.0.1:40618_solr ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/hdfsbackuprestore/state.json] for collection [hdfsbackuprestore] has occurred - updating... (live nodes size: [2]) [junit4] 2> 1156331 INFO (zkCallback-338-thread-2-processing-n:127.0.0.1:44216_solr) [n:127.0.0.1:44216_solr ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/hdfsbackuprestore/state.json] for collection [hdfsbackuprestore] has occurred - updating... (live nodes size: [2]) [junit4] 2> 1156362 INFO (zkCallback-333-thread-2-processing-n:127.0.0.1:40618_solr) [n:127.0.0.1:40618_solr ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/hdfsbackuprestore/state.json] for collection [hdfsbackuprestore] has occurred - updating... (live nodes size: [2]) [junit4] 2> 1157019 INFO (qtp1259474705-2332) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.c.SolrConfig Using Lucene MatchVersion: 7.0.0 [junit4] 2> 1157214 INFO (qtp1259474705-2335) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard1 r:core_node1 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.SolrConfig Using Lucene MatchVersion: 7.0.0 [junit4] 2> 1157326 INFO (qtp1259474705-2332) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.s.IndexSchema [hdfsbackuprestore_shard2_replica_n1] Schema name=minimal [junit4] 2> 1157329 INFO (qtp1259474705-2332) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.s.IndexSchema Loaded schema minimal/1.1 with uniqueid field id [junit4] 2> 1157329 INFO (qtp1259474705-2332) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.c.CoreContainer Creating SolrCore 'hdfsbackuprestore_shard2_replica_n1' using configuration from collection hdfsbackuprestore, trusted=true [junit4] 2> 1157333 INFO (qtp1259474705-2335) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard1 r:core_node1 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.s.IndexSchema [hdfsbackuprestore_shard1_replica_n1] Schema name=minimal [junit4] 2> 1157378 INFO (qtp1259474705-2332) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.core.hdfsbackuprestore.shard2.replica_n1' (registry 'solr.core.hdfsbackuprestore.shard2.replica_n1') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@3901f6d0 [junit4] 2> 1157380 INFO (qtp1259474705-2335) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard1 r:core_node1 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.s.IndexSchema Loaded schema minimal/1.1 with uniqueid field id [junit4] 2> 1157380 INFO (qtp1259474705-2335) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard1 r:core_node1 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.CoreContainer Creating SolrCore 'hdfsbackuprestore_shard1_replica_n1' using configuration from collection hdfsbackuprestore, trusted=true [junit4] 2> 1157381 INFO (qtp1259474705-2335) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard1 r:core_node1 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.core.hdfsbackuprestore.shard1.replica_n1' (registry 'solr.core.hdfsbackuprestore.shard1.replica_n1') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@3901f6d0 [junit4] 2> 1157460 INFO (qtp1259474705-2332) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.c.SolrCore solr.RecoveryStrategy.Builder [junit4] 2> 1157460 INFO (qtp1259474705-2332) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.c.SolrCore [[hdfsbackuprestore_shard2_replica_n1] ] Opening new SolrCore at [/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.0/solr/build/solr-core/test/J2/temp/solr.cloud.TestHdfsCloudBackupRestore_259AE5AA5F7E8D83-001/tempDir-002/node1/hdfsbackuprestore_shard2_replica_n1], dataDir=[/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.0/solr/build/solr-core/test/J2/temp/solr.cloud.TestHdfsCloudBackupRestore_259AE5AA5F7E8D83-001/tempDir-002/node1/./hdfsbackuprestore_shard2_replica_n1/data/] [junit4] 2> 1157460 INFO (qtp1259474705-2335) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard1 r:core_node1 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.SolrCore solr.RecoveryStrategy.Builder [junit4] 2> 1157461 INFO (qtp1259474705-2335) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard1 r:core_node1 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.SolrCore [[hdfsbackuprestore_shard1_replica_n1] ] Opening new SolrCore at [/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.0/solr/build/solr-core/test/J2/temp/solr.cloud.TestHdfsCloudBackupRestore_259AE5AA5F7E8D83-001/tempDir-002/node1/hdfsbackuprestore_shard1_replica_n1], dataDir=[/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.0/solr/build/solr-core/test/J2/temp/solr.cloud.TestHdfsCloudBackupRestore_259AE5AA5F7E8D83-001/tempDir-002/node1/./hdfsbackuprestore_shard1_replica_n1/data/] [junit4] 2> 1157473 INFO (qtp1452068201-2341) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard2 r:core_node4 x:hdfsbackuprestore_shard2_replica_t1] o.a.s.c.SolrConfig Using Lucene MatchVersion: 7.0.0 [junit4] 2> 1157506 INFO (qtp1452068201-2347) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard1 r:core_node2 x:hdfsbackuprestore_shard1_replica_t1] o.a.s.c.SolrConfig Using Lucene MatchVersion: 7.0.0 [junit4] 2> 1157527 INFO (qtp1452068201-2347) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard1 r:core_node2 x:hdfsbackuprestore_shard1_replica_t1] o.a.s.s.IndexSchema [hdfsbackuprestore_shard1_replica_t1] Schema name=minimal [junit4] 2> 1157541 INFO (qtp1452068201-2347) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard1 r:core_node2 x:hdfsbackuprestore_shard1_replica_t1] o.a.s.s.IndexSchema Loaded schema minimal/1.1 with uniqueid field id [junit4] 2> 1157541 INFO (qtp1452068201-2347) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard1 r:core_node2 x:hdfsbackuprestore_shard1_replica_t1] o.a.s.c.CoreContainer Creating SolrCore 'hdfsbackuprestore_shard1_replica_t1' using configuration from collection hdfsbackuprestore, trusted=true [junit4] 2> 1157542 INFO (qtp1452068201-2347) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard1 r:core_node2 x:hdfsbackuprestore_shard1_replica_t1] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.core.hdfsbackuprestore.shard1.replica_t1' (registry 'solr.core.hdfsbackuprestore.shard1.replica_t1') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@3901f6d0 [junit4] 2> 1157542 INFO (qtp1452068201-2347) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard1 r:core_node2 x:hdfsbackuprestore_shard1_replica_t1] o.a.s.c.SolrCore solr.RecoveryStrategy.Builder [junit4] 2> 1157542 INFO (qtp1452068201-2347) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard1 r:core_node2 x:hdfsbackuprestore_shard1_replica_t1] o.a.s.c.SolrCore [[hdfsbackuprestore_shard1_replica_t1] ] Opening new SolrCore at [/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.0/solr/build/solr-core/test/J2/temp/solr.cloud.TestHdfsCloudBackupRestore_259AE5AA5F7E8D83-001/tempDir-002/node2/hdfsbackuprestore_shard1_replica_t1], dataDir=[/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.0/solr/build/solr-core/test/J2/temp/solr.cloud.TestHdfsCloudBackupRestore_259AE5AA5F7E8D83-001/tempDir-002/node2/./hdfsbackuprestore_shard1_replica_t1/data/] [junit4] 2> 1157586 INFO (qtp1452068201-2341) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard2 r:core_node4 x:hdfsbackuprestore_shard2_replica_t1] o.a.s.s.IndexSchema [hdfsbackuprestore_shard2_replica_t1] Schema name=minimal [junit4] 2> 1157588 INFO (qtp1452068201-2341) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard2 r:core_node4 x:hdfsbackuprestore_shard2_replica_t1] o.a.s.s.IndexSchema Loaded schema minimal/1.1 with uniqueid field id [junit4] 2> 1157588 INFO (qtp1452068201-2341) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard2 r:core_node4 x:hdfsbackuprestore_shard2_replica_t1] o.a.s.c.CoreContainer Creating SolrCore 'hdfsbackuprestore_shard2_replica_t1' using configuration from collection hdfsbackuprestore, trusted=true [junit4] 2> 1157588 INFO (qtp1452068201-2341) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard2 r:core_node4 x:hdfsbackuprestore_shard2_replica_t1] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.core.hdfsbackuprestore.shard2.replica_t1' (registry 'solr.core.hdfsbackuprestore.shard2.replica_t1') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@3901f6d0 [junit4] 2> 1157588 INFO (qtp1452068201-2341) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard2 r:core_node4 x:hdfsbackuprestore_shard2_replica_t1] o.a.s.c.SolrCore solr.RecoveryStrategy.Builder [junit4] 2> 1157588 INFO (qtp1452068201-2341) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard2 r:core_node4 x:hdfsbackuprestore_shard2_replica_t1] o.a.s.c.SolrCore [[hdfsbackuprestore_shard2_replica_t1] ] Opening new SolrCore at [/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.0/solr/build/solr-core/test/J2/temp/solr.cloud.TestHdfsCloudBackupRestore_259AE5AA5F7E8D83-001/tempDir-002/node2/hdfsbackuprestore_shard2_replica_t1], dataDir=[/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.0/solr/build/solr-core/test/J2/temp/solr.cloud.TestHdfsCloudBackupRestore_259AE5AA5F7E8D83-001/tempDir-002/node2/./hdfsbackuprestore_shard2_replica_t1/data/] [junit4] 2> 1158296 INFO (qtp1259474705-2335) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard1 r:core_node1 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.UpdateLog [junit4] 2> 1158296 INFO (qtp1259474705-2335) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard1 r:core_node1 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir=null defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536 [junit4] 2> 1158297 INFO (qtp1259474705-2335) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard1 r:core_node1 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.CommitTracker Hard AutoCommit: disabled [junit4] 2> 1158297 INFO (qtp1259474705-2335) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard1 r:core_node1 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.CommitTracker Soft AutoCommit: disabled [junit4] 2> 1158345 INFO (qtp1259474705-2335) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard1 r:core_node1 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.s.SolrIndexSearcher Opening [Searcher@65ea807[hdfsbackuprestore_shard1_replica_n1] main] [junit4] 2> 1158346 INFO (qtp1259474705-2335) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard1 r:core_node1 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/conf1 [junit4] 2> 1158346 INFO (qtp1259474705-2335) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard1 r:core_node1 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1 [junit4] 2> 1158347 INFO (qtp1259474705-2335) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard1 r:core_node1 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.h.ReplicationHandler Commits will be reserved for 10000 [junit4] 2> 1158442 INFO (searcherExecutor-1336-thread-1-processing-n:127.0.0.1:44216_solr x:hdfsbackuprestore_shard1_replica_n1 s:shard1 c:hdfsbackuprestore r:core_node1) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard1 r:core_node1 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.SolrCore [hdfsbackuprestore_shard1_replica_n1] Registered new searcher Searcher@65ea807[hdfsbackuprestore_shard1_replica_n1] main{ExitableDirectoryReader(UninvertingDirectoryReader())} [junit4] 2> 1158442 INFO (qtp1259474705-2335) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard1 r:core_node1 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1577936486998736896 [junit4] 2> 1158456 INFO (qtp1452068201-2347) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard1 r:core_node2 x:hdfsbackuprestore_shard1_replica_t1] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.UpdateLog [junit4] 2> 1158456 INFO (qtp1452068201-2347) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard1 r:core_node2 x:hdfsbackuprestore_shard1_replica_t1] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir=null defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536 [junit4] 2> 1158471 INFO (qtp1452068201-2347) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard1 r:core_node2 x:hdfsbackuprestore_shard1_replica_t1] o.a.s.u.CommitTracker Hard AutoCommit: disabled [junit4] 2> 1158471 INFO (qtp1452068201-2347) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard1 r:core_node2 x:hdfsbackuprestore_shard1_replica_t1] o.a.s.u.CommitTracker Soft AutoCommit: disabled [junit4] 2> 1158487 INFO (qtp1259474705-2332) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.UpdateLog [junit4] 2> 1158487 INFO (qtp1259474705-2332) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir=null defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536 [junit4] 2> 1158488 INFO (qtp1259474705-2332) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.u.CommitTracker Hard AutoCommit: disabled [junit4] 2> 1158488 INFO (qtp1259474705-2332) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.u.CommitTracker Soft AutoCommit: disabled [junit4] 2> 1158569 INFO (qtp1452068201-2347) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard1 r:core_node2 x:hdfsbackuprestore_shard1_replica_t1] o.a.s.s.SolrIndexSearcher Opening [Searcher@ace6316[hdfsbackuprestore_shard1_replica_t1] main] [junit4] 2> 1158569 INFO (qtp1259474705-2332) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.s.SolrIndexSearcher Opening [Searcher@4a0db3a0[hdfsbackuprestore_shard2_replica_n1] main] [junit4] 2> 1158571 INFO (qtp1259474705-2332) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/conf1 [junit4] 2> 1158571 INFO (qtp1259474705-2332) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1 [junit4] 2> 1158584 INFO (qtp1452068201-2347) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard1 r:core_node2 x:hdfsbackuprestore_shard1_replica_t1] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/conf1 [junit4] 2> 1158585 INFO (qtp1452068201-2347) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard1 r:core_node2 x:hdfsbackuprestore_shard1_replica_t1] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1 [junit4] 2> 1158585 INFO (qtp1452068201-2347) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard1 r:core_node2 x:hdfsbackuprestore_shard1_replica_t1] o.a.s.h.ReplicationHandler Commits will be reserved for 10000 [junit4] 2> 1158649 INFO (qtp1259474705-2332) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.h.ReplicationHandler Commits will be reserved for 10000 [junit4] 2> 1158656 INFO (qtp1452068201-2347) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard1 r:core_node2 x:hdfsbackuprestore_shard1_replica_t1] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1577936487223132160 [junit4] 2> 1158658 INFO (qtp1259474705-2332) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1577936487225229312 [junit4] 2> 1158762 INFO (qtp1259474705-2335) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard1 r:core_node1 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext Waiting until we see more replicas up for shard shard1: total=2 found=1 timeoutin=9898ms [junit4] 2> 1158782 INFO (searcherExecutor-1335-thread-1-processing-n:127.0.0.1:44216_solr x:hdfsbackuprestore_shard2_replica_n1 s:shard2 c:hdfsbackuprestore r:core_node3) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.c.SolrCore [hdfsbackuprestore_shard2_replica_n1] Registered new searcher Searcher@4a0db3a0[hdfsbackuprestore_shard2_replica_n1] main{ExitableDirectoryReader(UninvertingDirectoryReader())} [junit4] 2> 1158782 INFO (searcherExecutor-1337-thread-1-processing-n:127.0.0.1:40618_solr x:hdfsbackuprestore_shard1_replica_t1 s:shard1 c:hdfsbackuprestore r:core_node2) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard1 r:core_node2 x:hdfsbackuprestore_shard1_replica_t1] o.a.s.c.SolrCore [hdfsbackuprestore_shard1_replica_t1] Registered new searcher Searcher@ace6316[hdfsbackuprestore_shard1_replica_t1] main{ExitableDirectoryReader(UninvertingDirectoryReader())} [junit4] 2> 1158811 INFO (qtp1452068201-2341) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard2 r:core_node4 x:hdfsbackuprestore_shard2_replica_t1] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.UpdateLog [junit4] 2> 1158811 INFO (qtp1452068201-2341) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard2 r:core_node4 x:hdfsbackuprestore_shard2_replica_t1] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir=null defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536 [junit4] 2> 1158812 INFO (qtp1452068201-2341) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard2 r:core_node4 x:hdfsbackuprestore_shard2_replica_t1] o.a.s.u.CommitTracker Hard AutoCommit: disabled [junit4] 2> 1158825 INFO (zkCallback-338-thread-2-processing-n:127.0.0.1:44216_solr) [n:127.0.0.1:44216_solr ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/hdfsbackuprestore/state.json] for collection [hdfsbackuprestore] has occurred - updating... (live nodes size: [2]) [junit4] 2> 1158825 INFO (zkCallback-333-thread-1-processing-n:127.0.0.1:40618_solr) [n:127.0.0.1:40618_solr ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/hdfsbackuprestore/state.json] for collection [hdfsbackuprestore] has occurred - updating... (live nodes size: [2]) [junit4] 2> 1158825 INFO (zkCallback-333-thread-2-processing-n:127.0.0.1:40618_solr) [n:127.0.0.1:40618_solr ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/hdfsbackuprestore/state.json] for collection [hdfsbackuprestore] has occurred - updating... (live nodes size: [2]) [junit4] 2> 1158840 INFO (qtp1452068201-2341) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard2 r:core_node4 x:hdfsbackuprestore_shard2_replica_t1] o.a.s.u.CommitTracker Soft AutoCommit: disabled [junit4] 2> 1158856 INFO (qtp1452068201-2341) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard2 r:core_node4 x:hdfsbackuprestore_shard2_replica_t1] o.a.s.s.SolrIndexSearcher Opening [Searcher@37cd7cce[hdfsbackuprestore_shard2_replica_t1] main] [junit4] 2> 1158873 INFO (qtp1259474705-2332) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.c.ShardLeaderElectionContext Waiting until we see more replicas up for shard shard2: total=2 found=1 timeoutin=9999ms [junit4] 2> 1158874 INFO (qtp1452068201-2341) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard2 r:core_node4 x:hdfsbackuprestore_shard2_replica_t1] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/conf1 [junit4] 2> 1158874 INFO (qtp1452068201-2341) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard2 r:core_node4 x:hdfsbackuprestore_shard2_replica_t1] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1 [junit4] 2> 1158875 INFO (qtp1452068201-2341) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard2 r:core_node4 x:hdfsbackuprestore_shard2_replica_t1] o.a.s.h.ReplicationHandler Commits will be reserved for 10000 [junit4] 2> 1158888 INFO (qtp1452068201-2341) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard2 r:core_node4 x:hdfsbackuprestore_shard2_replica_t1] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1577936487466401792 [junit4] 2> 1158901 INFO (searcherExecutor-1338-thread-1-processing-n:127.0.0.1:40618_solr x:hdfsbackuprestore_shard2_replica_t1 s:shard2 c:hdfsbackuprestore r:core_node4) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard2 r:core_node4 x:hdfsbackuprestore_shard2_replica_t1] o.a.s.c.SolrCore [hdfsbackuprestore_shard2_replica_t1] Registered new searcher Searcher@37cd7cce[hdfsbackuprestore_shard2_replica_t1] main{ExitableDirectoryReader(UninvertingDirectoryReader())} [junit4] 2> 1158976 INFO (zkCallback-333-thread-1-processing-n:127.0.0.1:40618_solr) [n:127.0.0.1:40618_solr ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/hdfsbackuprestore/state.json] for collection [hdfsbackuprestore] has occurred - updating... (live nodes size: [2]) [junit4] 2> 1158976 INFO (zkCallback-338-thread-2-processing-n:127.0.0.1:44216_solr) [n:127.0.0.1:44216_solr ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/hdfsbackuprestore/state.json] for collection [hdfsbackuprestore] has occurred - updating... (live nodes size: [2]) [junit4] 2> 1158981 INFO (zkCallback-333-thread-2-processing-n:127.0.0.1:40618_solr) [n:127.0.0.1:40618_solr ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/hdfsbackuprestore/state.json] for collection [hdfsbackuprestore] has occurred - updating... (live nodes size: [2]) [junit4] 2> 1159270 INFO (qtp1259474705-2335) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard1 r:core_node1 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext Enough replicas found to continue. [junit4] 2> 1159270 INFO (qtp1259474705-2335) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard1 r:core_node1 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext I may be the new leader - try and sync [junit4] 2> 1159270 INFO (qtp1259474705-2335) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard1 r:core_node1 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.SyncStrategy Sync replicas to https://127.0.0.1:44216/solr/hdfsbackuprestore_shard1_replica_n1/ [junit4] 2> 1159270 INFO (qtp1259474705-2335) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard1 r:core_node1 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.PeerSync PeerSync: core=hdfsbackuprestore_shard1_replica_n1 url=https://127.0.0.1:44216/solr START replicas=[https://127.0.0.1:40618/solr/hdfsbackuprestore_shard1_replica_t1/] nUpdates=100 [junit4] 2> 1159303 INFO (qtp1259474705-2335) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard1 r:core_node1 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.PeerSync PeerSync: core=hdfsbackuprestore_shard1_replica_n1 url=https://127.0.0.1:44216/solr DONE. We have no versions. sync failed. [junit4] 2> 1159362 INFO (qtp1452068201-2343) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard1 r:core_node2 x:hdfsbackuprestore_shard1_replica_t1] o.a.s.c.S.Request [hdfsbackuprestore_shard1_replica_t1] webapp=/solr path=/get params={distrib=false&qt=/get&fingerprint=false&getVersions=100&wt=javabin&version=2} status=0 QTime=1 [junit4] 2> 1159371 INFO (qtp1259474705-2335) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard1 r:core_node1 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.SyncStrategy Leader's attempt to sync with shard failed, moving to the next candidate [junit4] 2> 1159372 INFO (qtp1259474705-2335) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard1 r:core_node1 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext We failed sync, but we have no versions - we can't sync in that case - we were active before, so become leader anyway [junit4] 2> 1159372 INFO (qtp1259474705-2335) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard1 r:core_node1 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext Found all replicas participating in election, clear LIR [junit4] 2> 1159384 INFO (qtp1259474705-2332) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.c.ShardLeaderElectionContext Enough replicas found to continue. [junit4] 2> 1159384 INFO (qtp1259474705-2332) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.c.ShardLeaderElectionContext I may be the new leader - try and sync [junit4] 2> 1159384 INFO (qtp1259474705-2332) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.c.SyncStrategy Sync replicas to https://127.0.0.1:44216/solr/hdfsbackuprestore_shard2_replica_n1/ [junit4] 2> 1159384 INFO (qtp1259474705-2332) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.u.PeerSync PeerSync: core=hdfsbackuprestore_shard2_replica_n1 url=https://127.0.0.1:44216/solr START replicas=[https://127.0.0.1:40618/solr/hdfsbackuprestore_shard2_replica_t1/] nUpdates=100 [junit4] 2> 1159384 INFO (qtp1259474705-2332) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.u.PeerSync PeerSync: core=hdfsbackuprestore_shard2_replica_n1 url=https://127.0.0.1:44216/solr DONE. We have no versions. sync failed. [junit4] 2> 1159385 INFO (qtp1452068201-2345) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard2 r:core_node4 x:hdfsbackuprestore_shard2_replica_t1] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_t1] webapp=/solr path=/get params={distrib=false&qt=/get&fingerprint=false&getVersions=100&wt=javabin&version=2} status=0 QTime=0 [junit4] 2> 1159387 INFO (qtp1259474705-2332) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.c.SyncStrategy Leader's attempt to sync with shard failed, moving to the next candidate [junit4] 2> 1159387 INFO (qtp1259474705-2332) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.c.ShardLeaderElectionContext We failed sync, but we have no versions - we can't sync in that case - we were active before, so become leader anyway [junit4] 2> 1159387 INFO (qtp1259474705-2332) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.c.ShardLeaderElectionContext Found all replicas participating in election, clear LIR [junit4] 2> 1159401 INFO (qtp1259474705-2335) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard1 r:core_node1 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext I am the new leader: https://127.0.0.1:44216/solr/hdfsbackuprestore_shard1_replica_n1/ shard1 [junit4] 2> 1159402 INFO (qtp1259474705-2332) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.c.ShardLeaderElectionContext I am the new leader: https://127.0.0.1:44216/solr/hdfsbackuprestore_shard2_replica_n1/ shard2 [junit4] 2> 1159518 INFO (zkCallback-333-thread-2-processing-n:127.0.0.1:40618_solr) [n:127.0.0.1:40618_solr ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/hdfsbackuprestore/state.json] for collection [hdfsbackuprestore] has occurred - updating... (live nodes size: [2]) [junit4] 2> 1159518 INFO (zkCallback-333-thread-1-processing-n:127.0.0.1:40618_solr) [n:127.0.0.1:40618_solr ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/hdfsbackuprestore/state.json] for collection [hdfsbackuprestore] has occurred - updating... (live nodes size: [2]) [junit4] 2> 1159519 INFO (zkCallback-338-thread-2-processing-n:127.0.0.1:44216_solr) [n:127.0.0.1:44216_solr ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/hdfsbackuprestore/state.json] for collection [hdfsbackuprestore] has occurred - updating... (live nodes size: [2]) [junit4] 2> 1159583 INFO (qtp1259474705-2332) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.c.ZkController I am the leader, no recovery necessary [junit4] 2> 1159583 INFO (qtp1259474705-2335) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard1 r:core_node1 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.ZkController I am the leader, no recovery necessary [junit4] 2> 1159584 INFO (qtp1259474705-2335) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard1 r:core_node1 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores params={qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore&version=2&replicaType=NRT&property.customKey=customValue&coreNodeName=core_node1&name=hdfsbackuprestore_shard1_replica_n1&action=CREATE&numShards=2&shard=shard1&wt=javabin} status=0 QTime=3648 [junit4] 2> 1159601 INFO (qtp1259474705-2332) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores params={qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore&version=2&replicaType=NRT&property.customKey=customValue&coreNodeName=core_node3&name=hdfsbackuprestore_shard2_replica_n1&action=CREATE&numShards=2&shard=shard2&wt=javabin} status=0 QTime=3664 [junit4] 2> 1159721 INFO (zkCallback-333-thread-2-processing-n:127.0.0.1:40618_solr) [n:127.0.0.1:40618_solr ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/hdfsbackuprestore/state.json] for collection [hdfsbackuprestore] has occurred - updating... (live nodes size: [2]) [junit4] 2> 1159721 INFO (zkCallback-333-thread-1-processing-n:127.0.0.1:40618_solr) [n:127.0.0.1:40618_solr ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/hdfsbackuprestore/state.json] for collection [hdfsbackuprestore] has occurred - updating... (live nodes size: [2]) [junit4] 2> 1159722 INFO (zkCallback-338-thread-2-processing-n:127.0.0.1:44216_solr) [n:127.0.0.1:44216_solr ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/hdfsbackuprestore/state.json] for collection [hdfsbackuprestore] has occurred - updating... (live nodes size: [2]) [junit4] 2> 1159835 INFO (qtp1452068201-2347) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard1 r:core_node2 x:hdfsbackuprestore_shard1_replica_t1] o.a.s.c.ZkController hdfsbackuprestore_shard1_replica_t1 starting background replication from leader [junit4] 2> 1159835 INFO (qtp1452068201-2347) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard1 r:core_node2 x:hdfsbackuprestore_shard1_replica_t1] o.a.s.c.ReplicateFromLeader Will start replication from leader with poll interval: 00:00:03 [junit4] 2> 1159850 INFO (qtp1452068201-2347) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard1 r:core_node2 x:hdfsbackuprestore_shard1_replica_t1] o.a.s.h.ReplicationHandler Poll scheduled at an interval of 3000ms [junit4] 2> 1159852 INFO (qtp1452068201-2347) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard1 r:core_node2 x:hdfsbackuprestore_shard1_replica_t1] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores params={qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore&version=2&replicaType=TLOG&property.customKey=customValue&coreNodeName=core_node2&name=hdfsbackuprestore_shard1_replica_t1&action=CREATE&numShards=2&shard=shard1&wt=javabin} status=0 QTime=3725 [junit4] 2> 1159853 INFO (indexFetcher-1356-thread-1) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard1 r:core_node2 x:hdfsbackuprestore_shard1_replica_t1] o.a.s.h.IndexFetcher Updated masterUrl to https://127.0.0.1:44216/solr/hdfsbackuprestore_shard1_replica_n1/ [junit4] 2> 1159889 INFO (qtp1259474705-2333) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard1 r:core_node1 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.S.Request [hdfsbackuprestore_shard1_replica_n1] webapp=/solr path=/replication params={qt=/replication&wt=javabin&version=2&command=indexversion} status=0 QTime=0 [junit4] 2> 1159889 INFO (indexFetcher-1356-thread-1) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard1 r:core_node2 x:hdfsbackuprestore_shard1_replica_t1] o.a.s.h.IndexFetcher Master's generation: 1 [junit4] 2> 1159890 INFO (indexFetcher-1356-thread-1) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard1 r:core_node2 x:hdfsbackuprestore_shard1_replica_t1] o.a.s.h.IndexFetcher Master's version: 0 [junit4] 2> 1159890 INFO (indexFetcher-1356-thread-1) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard1 r:core_node2 x:hdfsbackuprestore_shard1_replica_t1] o.a.s.h.IndexFetcher Slave's generation: 1 [junit4] 2> 1159890 INFO (indexFetcher-1356-thread-1) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard1 r:core_node2 x:hdfsbackuprestore_shard1_replica_t1] o.a.s.h.IndexFetcher Slave's version: 0 [junit4] 2> 1159906 INFO (qtp1452068201-2341) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard2 r:core_node4 x:hdfsbackuprestore_shard2_replica_t1] o.a.s.c.ZkController hdfsbackuprestore_shard2_replica_t1 starting background replication from leader [junit4] 2> 1159906 INFO (qtp1452068201-2341) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard2 r:core_node4 x:hdfsbackuprestore_shard2_replica_t1] o.a.s.c.ReplicateFromLeader Will start replication from leader with poll interval: 00:00:03 [junit4] 2> 1159919 INFO (qtp1452068201-2341) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard2 r:core_node4 x:hdfsbackuprestore_shard2_replica_t1] o.a.s.h.ReplicationHandler Poll scheduled at an interval of 3000ms [junit4] 2> 1159936 INFO (qtp1452068201-2341) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard2 r:core_node4 x:hdfsbackuprestore_shard2_replica_t1] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores params={qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore&version=2&replicaType=TLOG&property.customKey=customValue&coreNodeName=core_node4&name=hdfsbackuprestore_shard2_replica_t1&action=CREATE&numShards=2&shard=shard2&wt=javabin} status=0 QTime=3715 [junit4] 2> 1159969 INFO (qtp1259474705-2338) [n:127.0.0.1:44216_solr ] o.a.s.h.a.CollectionsHandler Wait for new collection to be active for at most 30 seconds. Check all shard replicas [junit4] 2> 1159970 INFO (OverseerCollectionConfigSetProcessor-98621029983387654-127.0.0.1:44216_solr-n_0000000000) [n:127.0.0.1:44216_solr ] o.a.s.c.OverseerTaskQueue Response ZK path: /overseer/collection-queue-work/qnr-0000000000 doesn't exist. Requestor may have disconnected from ZooKeeper [junit4] 2> 1160053 INFO (zkCallback-333-thread-2-processing-n:127.0.0.1:40618_solr) [n:127.0.0.1:40618_solr ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/hdfsbackuprestore/state.json] for collection [hdfsbackuprestore] has occurred - updating... (live nodes size: [2]) [junit4] 2> 1160065 INFO (zkCallback-338-thread-2-processing-n:127.0.0.1:44216_solr) [n:127.0.0.1:44216_solr ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/hdfsbackuprestore/state.json] for collection [hdfsbackuprestore] has occurred - updating... (live nodes size: [2]) [junit4] 2> 1160065 INFO (zkCallback-333-thread-1-processing-n:127.0.0.1:40618_solr) [n:127.0.0.1:40618_solr ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/hdfsbackuprestore/state.json] for collection [hdfsbackuprestore] has occurred - updating... (live nodes size: [2]) [junit4] 2> 1160706 INFO (indexFetcher-1358-thread-1) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard2 r:core_node4 x:hdfsbackuprestore_shard2_replica_t1] o.a.s.h.IndexFetcher Updated masterUrl to https://127.0.0.1:44216/solr/hdfsbackuprestore_shard2_replica_n1/ [junit4] 2> 1160708 INFO (qtp1259474705-2336) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_n1] webapp=/solr path=/replication params={qt=/replication&wt=javabin&version=2&command=indexversion} status=0 QTime=0 [junit4] 2> 1160722 INFO (indexFetcher-1358-thread-1) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard2 r:core_node4 x:hdfsbackuprestore_shard2_replica_t1] o.a.s.h.IndexFetcher Master's generation: 1 [junit4] 2> 1160722 INFO (indexFetcher-1358-thread-1) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard2 r:core_node4 x:hdfsbackuprestore_shard2_replica_t1] o.a.s.h.IndexFetcher Master's version: 0 [junit4] 2> 1160722 INFO (indexFetcher-1358-thread-1) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard2 r:core_node4 x:hdfsbackuprestore_shard2_replica_t1] o.a.s.h.IndexFetcher Slave's generation: 1 [junit4] 2> 1160722 INFO (indexFetcher-1358-thread-1) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard2 r:core_node4 x:hdfsbackuprestore_shard2_replica_t1] o.a.s.h.IndexFetcher Slave's version: 0 [junit4] 2> 1160982 INFO (qtp1259474705-2338) [n:127.0.0.1:44216_solr ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/collections params={replicationFactor=1&collection.configName=conf1&version=2&pullReplicas=0&property.customKey=customValue&maxShardsPerNode=2&router.field=shard_s&autoAddReplicas=true&name=hdfsbackuprestore&nrtReplicas=1&action=CREATE&numShards=2&tlogReplicas=1&wt=javabin} status=0 QTime=5714 [junit4] 2> 1161248 INFO (qtp1452068201-2344) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard2 r:core_node4 x:hdfsbackuprestore_shard2_replica_t1] o.a.s.u.p.LogUpdateProcessorFactory [hdfsbackuprestore_shard2_replica_t1] webapp=/solr path=/update params={update.distrib=FROMLEADER&distrib.from=https://127.0.0.1:44216/solr/hdfsbackuprestore_shard2_replica_n1/&wt=javabin&version=2}{add=[0 (1577936489679945728), 1 (1577936489700917248), 2 (1577936489700917249), 3 (1577936489700917250), 4 (1577936489700917251), 5 (1577936489700917252), 6 (1577936489700917253), 7 (1577936489700917254), 8 (1577936489700917255), 9 (1577936489700917256), ... (80 adds)]} 0 82 [junit4] 2> 1161258 INFO (qtp1259474705-2339) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.u.p.LogUpdateProcessorFactory [hdfsbackuprestore_shard2_replica_n1] webapp=/solr path=/update params={wt=javabin&version=2}{add=[0 (1577936489679945728), 1 (1577936489700917248), 2 (1577936489700917249), 3 (1577936489700917250), 4 (1577936489700917251), 5 (1577936489700917252), 6 (1577936489700917253), 7 (1577936489700917254), 8 (1577936489700917255), 9 (1577936489700917256), ... (80 adds)]} 0 258 [junit4] 2> 1161275 INFO (qtp1259474705-2332) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard1 r:core_node1 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.DirectUpdateHandler2 start commit{_version_=1577936489969352704,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false} [junit4] 2> 1161356 INFO (qtp1452068201-2346) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard1 r:core_node2 x:hdfsbackuprestore_shard1_replica_t1] o.a.s.u.TestInjection Start waiting for replica in sync with leader [junit4] 2> 1161356 INFO (qtp1259474705-2332) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard1 r:core_node1 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.DirectUpdateHandler2 No uncommitted changes. Skipping IW.commit. [junit4] 2> 1161357 INFO (qtp1259474705-2332) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard1 r:core_node1 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.DirectUpdateHandler2 end_commit_flush [junit4] 2> 1161358 INFO (qtp1259474705-2332) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard1 r:core_node1 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.p.LogUpdateProcessorFactory [hdfsbackuprestore_shard1_replica_n1] webapp=/solr path=/update params={update.distrib=FROMLEADER&waitSearcher=true&openSearcher=true&commit=true&softCommit=false&distrib.from=https://127.0.0.1:44216/solr/hdfsbackuprestore_shard2_replica_n1/&commit_end_point=true&wt=javabin&version=2&expungeDeletes=false}{commit=} 0 82 [junit4] 2> 1161376 INFO (qtp1259474705-2337) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.u.DirectUpdateHandler2 start commit{_version_=1577936490075258880,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false} [junit4] 2> 1161376 INFO (qtp1259474705-2337) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.u.SolrIndexWriter Calling setCommitData with IW:org.apache.solr.update.SolrIndexWriter@8ebae0 commitCommandVersion:1577936490075258880 [junit4] 2> 1161470 INFO (qtp1452068201-2343) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard2 r:core_node4 x:hdfsbackuprestore_shard2_replica_t1] o.a.s.u.TestInjection Start waiting for replica in sync with leader [junit4] 2> 1161709 INFO (qtp1259474705-2333) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_n1] webapp=/solr path=/replication params={qt=/replication&wt=javabin&version=2&command=details} status=0 QTime=81 [junit4] 2> 1161945 INFO (qtp1259474705-2336) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard1 r:core_node1 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.S.Request [hdfsbackuprestore_shard1_replica_n1] webapp=/solr path=/replication params={qt=/replication&wt=javabin&version=2&command=details} status=0 QTime=0 [junit4] 2> 1161945 INFO (qtp1452068201-2346) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard1 r:core_node2 x:hdfsbackuprestore_shard1_replica_t1] o.a.s.u.p.LogUpdateProcessorFactory [hdfsbackuprestore_shard1_replica_t1] webapp=/solr path=/update params={update.distrib=FROMLEADER&waitSearcher=true&openSearcher=true&commit=true&softCommit=false&distrib.from=https://127.0.0.1:44216/solr/hdfsbackuprestore_shard2_replica_n1/&commit_end_point=true&wt=javabin&version=2&expungeDeletes=false}{commit=} 0 589 [junit4] 2> 1162343 INFO (qtp1259474705-2337) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.s.SolrIndexSearcher Opening [Searcher@3c0a11e2[hdfsbackuprestore_shard2_replica_n1] main] [junit4] 2> 1162343 INFO (qtp1259474705-2337) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.u.DirectUpdateHandler2 end_commit_flush [junit4] 2> 1162351 INFO (qtp1259474705-2338) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_n1] webapp=/solr path=/replication params={qt=/replication&wt=javabin&version=2&command=details} status=0 QTime=0 [junit4] 2> 1162358 INFO (searcherExecutor-1335-thread-1-processing-n:127.0.0.1:44216_solr x:hdfsbackuprestore_shard2_replica_n1 s:shard2 c:hdfsbackuprestore r:core_node3) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.c.SolrCore [hdfsbackuprestore_shard2_replica_n1] Registered new searcher Searcher@3c0a11e2[hdfsbackuprestore_shard2_replica_n1] main{ExitableDirectoryReader(UninvertingDirectoryReader(Uninverting(_0(7.0.0):C80)))} [junit4] 2> 1162359 INFO (qtp1259474705-2337) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.u.p.LogUpdateProcessorFactory [hdfsbackuprestore_shard2_replica_n1] webapp=/solr path=/update params={update.distrib=FROMLEADER&waitSearcher=true&openSearcher=true&commit=true&softCommit=false&distrib.from=https://127.0.0.1:44216/solr/hdfsbackuprestore_shard2_replica_n1/&commit_end_point=true&wt=javabin&version=2&expungeDeletes=false}{commit=} 0 982 [junit4] 2> 1162869 INFO (qtp1259474705-2333) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard1 r:core_node1 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.S.Request [hdfsbackuprestore_shard1_replica_n1] webapp=/solr path=/replication params={qt=/replication&wt=javabin&version=2&command=indexversion} status=0 QTime=0 [junit4] 2> 1162964 INFO (qtp1259474705-2333) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_n1] webapp=/solr path=/replication params={qt=/replication&wt=javabin&version=2&command=details} status=0 QTime=55 [junit4] 2> 1162991 INFO (indexFetcher-1356-thread-1) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard1 r:core_node2 x:hdfsbackuprestore_shard1_replica_t1] o.a.s.h.IndexFetcher Master's generation: 1 [junit4] 2> 1162991 INFO (indexFetcher-1356-thread-1) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard1 r:core_node2 x:hdfsbackuprestore_shard1_replica_t1] o.a.s.h.IndexFetcher Master's version: 0 [junit4] 2> 1162991 INFO (indexFetcher-1356-thread-1) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard1 r:core_node2 x:hdfsbackuprestore_shard1_replica_t1] o.a.s.h.IndexFetcher Slave's generation: 1 [junit4] 2> 1162991 INFO (indexFetcher-1356-thread-1) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard1 r:core_node2 x:hdfsbackuprestore_shard1_replica_t1] o.a.s.h.IndexFetcher Slave's version: 0 [junit4] 2> 1163557 INFO (qtp1259474705-2338) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_n1] webapp=/solr path=/replication params={qt=/replication&wt=javabin&version=2&command=details} status=0 QTime=0 [junit4] 2> 1163705 INFO (qtp1259474705-2339) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_n1] webapp=/solr path=/replication params={qt=/replication&wt=javabin&version=2&command=indexversion} status=0 QTime=0 [junit4] 2> 1163721 INFO (indexFetcher-1358-thread-1) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard2 r:core_node4 x:hdfsbackuprestore_shard2_replica_t1] o.a.s.h.IndexFetcher Master's generation: 2 [junit4] 2> 1163721 INFO (indexFetcher-1358-thread-1) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard2 r:core_node4 x:hdfsbackuprestore_shard2_replica_t1] o.a.s.h.IndexFetcher Master's version: 1504837503505 [junit4] 2> 1163721 INFO (indexFetcher-1358-thread-1) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard2 r:core_node4 x:hdfsbackuprestore_shard2_replica_t1] o.a.s.h.IndexFetcher Slave's generation: 1 [junit4] 2> 1163721 INFO (indexFetcher-1358-thread-1) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard2 r:core_node4 x:hdfsbackuprestore_shard2_replica_t1] o.a.s.h.IndexFetcher Slave's version: 0 [junit4] 2> 1163721 INFO (indexFetcher-1358-thread-1) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard2 r:core_node4 x:hdfsbackuprestore_shard2_replica_t1] o.a.s.h.IndexFetcher Starting replication process [junit4] 2> 1163723 INFO (qtp1259474705-2339) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_n1] webapp=/solr path=/replication params={generation=2&qt=/replication&wt=javabin&version=2&command=filelist} status=0 QTime=1 [junit4] 2> 1163723 INFO (indexFetcher-1358-thread-1) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard2 r:core_node4 x:hdfsbackuprestore_shard2_replica_t1] o.a.s.h.IndexFetcher Number of files in latest index in master: 17 [junit4] 2> 1163756 INFO (indexFetcher-1358-thread-1) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard2 r:core_node4 x:hdfsbackuprestore_shard2_replica_t1] o.a.s.h.IndexFetcher Starting download (fullCopy=false) to MMapDirectory@/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.0/solr/build/solr-core/test/J2/temp/solr.cloud.TestHdfsCloudBackupRestore_259AE5AA5F7E8D83-001/tempDir-002/node2/hdfsbackuprestore_shard2_replica_t1/data/index.20170908042505853 lockFactory=org.apache.lucene.store.NativeFSLockFactory@322bfd7f [junit4] 2> 1163757 INFO (qtp1259474705-2332) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_n1] webapp=/solr path=/replication params={generation=2&qt=/replication&file=_0.si&checksum=true&wt=filestream&command=filecontent} status=0 QTime=0 [junit4] 2> 1163803 INFO (qtp1259474705-2337) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_n1] webapp=/solr path=/replication params={generation=2&qt=/replication&file=_0_Lucene50_0.doc&checksum=true&wt=filestream&command=filecontent} status=0 QTime=0 [junit4] 2> 1163819 INFO (qtp1259474705-2333) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_n1] webapp=/solr path=/replication params={generation=2&qt=/replication&file=_0_Lucene50_0.tim&checksum=true&wt=filestream&command=filecontent} status=0 QTime=0 [junit4] 2> 1163836 INFO (qtp1259474705-2336) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_n1] webapp=/solr path=/replication params={generation=2&qt=/replication&file=_0_LuceneVarGapFixedInterval_0.tiv&checksum=true&wt=filestream&command=filecontent} status=0 QTime=0 [junit4] 2> 1163838 INFO (qtp1259474705-2339) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_n1] webapp=/solr path=/replication params={generation=2&qt=/replication&file=_0_Lucene50_0.pos&checksum=true&wt=filestream&command=filecontent} status=0 QTime=0 [junit4] 2> 1163900 INFO (qtp1259474705-2332) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_n1] webapp=/solr path=/replication params={generation=2&qt=/replication&file=_0.nvd&checksum=true&wt=filestream&command=filecontent} status=0 QTime=61 [junit4] 2> 1163933 INFO (qtp1259474705-2332) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_n1] webapp=/solr path=/replication params={generation=2&qt=/replication&file=_0_LuceneVarGapFixedInterval_0.pos&checksum=true&wt=filestream&command=filecontent} status=0 QTime=0 [junit4] 2> 1163995 INFO (qtp1259474705-2332) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_n1] webapp=/solr path=/replication params={generation=2&qt=/replication&file=_0.fdx&checksum=true&wt=filestream&command=filecontent} status=0 QTime=0 [junit4] 2> 1164046 INFO (qtp1259474705-2337) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_n1] webapp=/solr path=/replication params={generation=2&qt=/replication&file=_0_Lucene50_0.tip&checksum=true&wt=filestream&command=filecontent} status=0 QTime=0 [junit4] 2> 1164048 INFO (qtp1259474705-2333) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_n1] webapp=/solr path=/replication params={generation=2&qt=/replication&file=_0_LuceneVarGapFixedInterval_0.doc&checksum=true&wt=filestream&command=filecontent} status=0 QTime=0 [junit4] 2> 1164049 INFO (qtp1259474705-2336) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_n1] webapp=/solr path=/replication params={generation=2&qt=/replication&file=_0.fdt&checksum=true&wt=filestream&command=filecontent} status=0 QTime=0 [junit4] 2> 1164050 INFO (qtp1259474705-2339) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_n1] webapp=/solr path=/replication params={generation=2&qt=/replication&file=_0.dii&checksum=true&wt=filestream&command=filecontent} status=0 QTime=0 [junit4] 2> 1164096 INFO (qtp1259474705-2332) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_n1] webapp=/solr path=/replication params={qt=/replication&wt=javabin&version=2&command=details} status=0 QTime=0 [junit4] 2> 1164108 INFO (qtp1259474705-2336) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_n1] webapp=/solr path=/replication params={generation=2&qt=/replication&file=_0_LuceneVarGapFixedInterval_0.tib&checksum=true&wt=filestream&command=filecontent} status=0 QTime=0 [junit4] 2> 1164157 INFO (qtp1259474705-2336) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_n1] webapp=/solr path=/replication params={generation=2&qt=/replication&file=_0.dim&checksum=true&wt=filestream&command=filecontent} status=0 QTime=0 [junit4] 2> 1164206 INFO (qtp1259474705-2336) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_n1] webapp=/solr path=/replication params={generation=2&qt=/replication&file=_0.nvm&checksum=true&wt=filestream&command=filecontent} status=0 QTime=0 [junit4] 2> 1164228 INFO (qtp1259474705-2336) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_n1] webapp=/solr path=/replication params={generation=2&qt=/replication&file=_0.fnm&checksum=true&wt=filestream&command=filecontent} status=0 QTime=0 [junit4] 2> 1164258 INFO (qtp1259474705-2337) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_n1] webapp=/solr path=/replication params={generation=2&qt=/replication&file=segments_2&checksum=true&wt=filestream&command=filecontent} status=0 QTime=0 [junit4] 2> 1164259 INFO (indexFetcher-1358-thread-1) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard2 r:core_node4 x:hdfsbackuprestore_shard2_replica_t1] o.a.s.h.IndexFetcher Total time taken for download (fullCopy=false,bytesDownloaded=4640) : 0 secs (null bytes/sec) to MMapDirectory@/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.0/solr/build/solr-core/test/J2/temp/solr.cloud.TestHdfsCloudBackupRestore_259AE5AA5F7E8D83-001/tempDir-002/node2/hdfsbackuprestore_shard2_replica_t1/data/index.20170908042505853 lockFactory=org.apache.lucene.store.NativeFSLockFactory@322bfd7f [junit4] 2> 1164471 INFO (indexFetcher-1358-thread-1) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard2 r:core_node4 x:hdfsbackuprestore_shard2_replica_t1] o.a.s.u.DefaultSolrCoreState New IndexWriter is ready to be used. [junit4] 2> 1164502 INFO (indexFetcher-1358-thread-1) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard2 r:core_node4 x:hdfsbackuprestore_shard2_replica_t1] o.a.s.s.SolrIndexSearcher Opening [Searcher@242d5436[hdfsbackuprestore_shard2_replica_t1] main] [junit4] 2> 1164503 INFO (searcherExecutor-1338-thread-1-processing-n:127.0.0.1:40618_solr x:hdfsbackuprestore_shard2_replica_t1 s:shard2 c:hdfsbackuprestore r:core_node4) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard2 r:core_node4 x:hdfsbackuprestore_shard2_replica_t1] o.a.s.c.SolrCore [hdfsbackuprestore_shard2_replica_t1] Registered new searcher Searcher@242d5436[hdfsbackuprestore_shard2_replica_t1] main{ExitableDirectoryReader(UninvertingDirectoryReader(Uninverting(_0(7.0.0):C80)))} [junit4] 2> 1164727 INFO (qtp1259474705-2338) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_n1] webapp=/solr path=/replication params={qt=/replication&wt=javabin&version=2&command=details} status=0 QTime=0 [junit4] 2> 1164727 INFO (qtp1452068201-2343) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard2 r:core_node4 x:hdfsbackuprestore_shard2_replica_t1] o.a.s.u.TestInjection Waiting time for tlog replica to be in sync with leader: 3242 [junit4] 2> 1164728 INFO (qtp1452068201-2343) [n:127.0.0.1:40618_solr c:hdfsbackuprestore s:shard2 r:core_node4 x:hdfsbackuprestore_shard2_replica_t1] o.a.s.u.p.LogUpdateProcessorFactory [hdfsbackuprestore_shard2_replica_t1] webapp=/solr path=/update params={update.distrib=FROMLEADER&waitSearcher=true&openSearcher=true&commit=true&softCommit=false&distrib.from=https://127.0.0.1:44216/solr/hdfsbackuprestore_shard2_replica_n1/&commit_end_point=true&wt=javabin&version=2&expungeDeletes=false}{commit=} 0 3257 [junit4] 2> 1164728 INFO (qtp1259474705-2335) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.u.p.LogUpdateProcessorFactory [hdfsbackuprestore_shard2_replica_n1] webapp=/solr path=/update params={_stateVer_=hdfsbackuprestore:8&waitSearcher=true&commit=true&softCommit=false&wt=javabin&version=2}{commit=} 0 3469 [junit4] 2> 1164790 INFO (qtp1259474705-2338) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_n1] webapp=/solr path=/select params={q=*:*&distrib=false&wt=javabin&version=2} hits=80 status=0 QTime=0 [junit4] 2> 1164793 INFO (qtp1259474705-2338) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard1 r:core_node1 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.S.Request [hdfsbackuprestore_shard1_replica_n1] webapp=/solr path=/select params={q=*:*&distrib=false&wt=javabin&version=2} hits=0 status=0 QTime=0 [junit4] 2> 1164793 INFO (TEST-TestHdfsCloudBackupRestore.test-seed#[259AE5AA5F7E8D83]) [ ] o.a.s.c.AbstractCloudBackupRestoreTestCase Triggering Backup command [junit4] 2> 1164806 INFO (qtp1259474705-2338) [n:127.0.0.1:44216_solr ] o.a.s.h.a.Collec [...truncated too long message...] reporters for registry=solr.core.hdfsbackuprestore.shard2.replica_n1, tag=135412327 [junit4] 2> 1189478 INFO (coreCloseExecutor-1371-thread-2) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.m.r.SolrJmxReporter Closing reporter [org.apache.solr.metrics.reporters.SolrJmxReporter@2f724d2c: rootName = null, domain = solr.core.hdfsbackuprestore.shard2.replica_n1, service url = null, agent id = null] for registry solr.core.hdfsbackuprestore.shard2.replica_n1 / com.codahale.metrics.MetricRegistry@660812ad [junit4] 2> 1189482 INFO (coreCloseExecutor-1371-thread-1) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard1 r:core_node1 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.SolrCore [hdfsbackuprestore_shard1_replica_n1] CLOSING SolrCore org.apache.solr.core.SolrCore@6b216ebe [junit4] 2> 1189497 INFO (coreCloseExecutor-1371-thread-2) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard2 r:core_node3 x:hdfsbackuprestore_shard2_replica_n1] o.a.s.m.SolrMetricManager Closing metric reporters for registry=solr.collection.hdfsbackuprestore.shard2.leader, tag=135412327 [junit4] 2> 1189502 INFO (coreCloseExecutor-1371-thread-1) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard1 r:core_node1 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.m.SolrMetricManager Closing metric reporters for registry=solr.core.hdfsbackuprestore.shard1.replica_n1, tag=1797353150 [junit4] 2> 1189502 INFO (coreCloseExecutor-1371-thread-1) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard1 r:core_node1 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.m.r.SolrJmxReporter Closing reporter [org.apache.solr.metrics.reporters.SolrJmxReporter@48ee8af3: rootName = null, domain = solr.core.hdfsbackuprestore.shard1.replica_n1, service url = null, agent id = null] for registry solr.core.hdfsbackuprestore.shard1.replica_n1 / com.codahale.metrics.MetricRegistry@1a3962f3 [junit4] 2> 1189543 INFO (zkCallback-338-thread-2-processing-n:127.0.0.1:44216_solr) [n:127.0.0.1:44216_solr ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/hdfsbackuprestore/state.json] for collection [hdfsbackuprestore] has occurred - updating... (live nodes size: [2]) [junit4] 2> 1189646 INFO (coreCloseExecutor-1371-thread-3) [n:127.0.0.1:44216_solr c:hdfsbackuprestore_restored s:shard2 r:core_node1 x:hdfsbackuprestore_restored_shard2_replica_n0] o.a.s.c.SolrCore [hdfsbackuprestore_restored_shard2_replica_n0] CLOSING SolrCore org.apache.solr.core.SolrCore@7ccbd8ca [junit4] 2> 1189646 INFO (coreCloseExecutor-1371-thread-4) [n:127.0.0.1:44216_solr c:hdfsbackuprestore_restored s:shard1 r:core_node2 x:hdfsbackuprestore_restored_shard1_replica_n0] o.a.s.c.SolrCore [hdfsbackuprestore_restored_shard1_replica_n0] CLOSING SolrCore org.apache.solr.core.SolrCore@4ffc4661 [junit4] 2> 1189647 INFO (coreCloseExecutor-1371-thread-1) [n:127.0.0.1:44216_solr c:hdfsbackuprestore s:shard1 r:core_node1 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.m.SolrMetricManager Closing metric reporters for registry=solr.collection.hdfsbackuprestore.shard1.leader, tag=1797353150 [junit4] 2> 1189648 INFO (coreCloseExecutor-1371-thread-3) [n:127.0.0.1:44216_solr c:hdfsbackuprestore_restored s:shard2 r:core_node1 x:hdfsbackuprestore_restored_shard2_replica_n0] o.a.s.m.SolrMetricManager Closing metric reporters for registry=solr.core.hdfsbackuprestore_restored.shard2.replica_n0, tag=2093734090 [junit4] 2> 1189693 INFO (coreCloseExecutor-1371-thread-3) [n:127.0.0.1:44216_solr c:hdfsbackuprestore_restored s:shard2 r:core_node1 x:hdfsbackuprestore_restored_shard2_replica_n0] o.a.s.m.r.SolrJmxReporter Closing reporter [org.apache.solr.metrics.reporters.SolrJmxReporter@694ed00f: rootName = null, domain = solr.core.hdfsbackuprestore_restored.shard2.replica_n0, service url = null, agent id = null] for registry solr.core.hdfsbackuprestore_restored.shard2.replica_n0 / com.codahale.metrics.MetricRegistry@2ed54b04 [junit4] 2> 1189758 INFO (coreCloseExecutor-1371-thread-3) [n:127.0.0.1:44216_solr c:hdfsbackuprestore_restored s:shard2 r:core_node1 x:hdfsbackuprestore_restored_shard2_replica_n0] o.a.s.m.SolrMetricManager Closing metric reporters for registry=solr.collection.hdfsbackuprestore_restored.shard2.leader, tag=2093734090 [junit4] 2> 1189758 INFO (coreCloseExecutor-1371-thread-4) [n:127.0.0.1:44216_solr c:hdfsbackuprestore_restored s:shard1 r:core_node2 x:hdfsbackuprestore_restored_shard1_replica_n0] o.a.s.m.SolrMetricManager Closing metric reporters for registry=solr.core.hdfsbackuprestore_restored.shard1.replica_n0, tag=1341933153 [junit4] 2> 1189759 INFO (coreCloseExecutor-1371-thread-4) [n:127.0.0.1:44216_solr c:hdfsbackuprestore_restored s:shard1 r:core_node2 x:hdfsbackuprestore_restored_shard1_replica_n0] o.a.s.m.r.SolrJmxReporter Closing reporter [org.apache.solr.metrics.reporters.SolrJmxReporter@590dc384: rootName = null, domain = solr.core.hdfsbackuprestore_restored.shard1.replica_n0, service url = null, agent id = null] for registry solr.core.hdfsbackuprestore_restored.shard1.replica_n0 / com.codahale.metrics.MetricRegistry@6dca9ebe [junit4] 2> 1189793 INFO (zkCallback-338-thread-2-processing-n:127.0.0.1:44216_solr) [n:127.0.0.1:44216_solr ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (1) [junit4] 2> 1189809 INFO (coreCloseExecutor-1371-thread-4) [n:127.0.0.1:44216_solr c:hdfsbackuprestore_restored s:shard1 r:core_node2 x:hdfsbackuprestore_restored_shard1_replica_n0] o.a.s.m.SolrMetricManager Closing metric reporters for registry=solr.collection.hdfsbackuprestore_restored.shard1.leader, tag=1341933153 [junit4] 2> 1190022 INFO (jetty-closer-326-thread-2) [ ] o.a.s.c.Overseer Overseer (id=98621029983387654-127.0.0.1:44216_solr-n_0000000000) closing [junit4] 2> 1190025 INFO (OverseerStateUpdate-98621029983387654-127.0.0.1:44216_solr-n_0000000000) [n:127.0.0.1:44216_solr ] o.a.s.c.Overseer Overseer Loop exiting : 127.0.0.1:44216_solr [junit4] 2> 1190088 WARN (zkCallback-338-thread-2-processing-n:127.0.0.1:44216_solr) [n:127.0.0.1:44216_solr ] o.a.s.c.c.ZkStateReader ZooKeeper watch triggered, but Solr cannot talk to ZK: [KeeperErrorCode = Session expired for /live_nodes] [junit4] 2> 1190088 INFO (jetty-closer-326-thread-2) [ ] o.e.j.s.h.ContextHandler Stopped o.e.j.s.ServletContextHandler@aeb2a13{/solr,null,UNAVAILABLE} [junit4] 2> 1191302 WARN (zkCallback-333-thread-1-processing-n:127.0.0.1:40618_solr) [n:127.0.0.1:40618_solr ] o.a.s.c.c.ZkStateReader ZooKeeper watch triggered, but Solr cannot talk to ZK: [KeeperErrorCode = Session expired for /live_nodes] [junit4] 2> 1191302 INFO (jetty-closer-326-thread-1) [ ] o.e.j.s.h.ContextHandler Stopped o.e.j.s.ServletContextHandler@5c312b53{/solr,null,UNAVAILABLE} [junit4] 2> 1191306 ERROR (SUITE-TestHdfsCloudBackupRestore-seed#[259AE5AA5F7E8D83]-worker) [ ] o.a.z.s.ZooKeeperServer ZKShutdownHandler is not registered, so ZooKeeper server won't take any action on ERROR or SHUTDOWN server state changes [junit4] 2> 1191306 INFO (SUITE-TestHdfsCloudBackupRestore-seed#[259AE5AA5F7E8D83]-worker) [ ] o.a.s.c.ZkTestServer connecting to 127.0.0.1:46742 46742 [junit4] 2> 1191384 INFO (Thread-486) [ ] o.a.s.c.ZkTestServer connecting to 127.0.0.1:46742 46742 [junit4] 2> 1191385 WARN (Thread-486) [ ] o.a.s.c.ZkTestServer Watch limit violations: [junit4] 2> Maximum concurrent create/delete watches above limit: [junit4] 2> [junit4] 2> 3 /solr/aliases.json [junit4] 2> 2 /solr/security.json [junit4] 2> 2 /solr/collections/hdfsbackuprestore_restored/state.json [junit4] 2> 2 /solr/configs/conf1 [junit4] 2> [junit4] 2> Maximum concurrent data watches above limit: [junit4] 2> [junit4] 2> 11 /solr/collections/hdfsbackuprestore/state.json [junit4] 2> 3 /solr/clusterstate.json [junit4] 2> 3 /solr/clusterprops.json [junit4] 2> [junit4] 2> Maximum concurrent children watches above limit: [junit4] 2> [junit4] 2> 3 /solr/live_nodes [junit4] 2> 3 /solr/collections [junit4] 2> [junit4] 2> NOTE: leaving temporary files on disk at: /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.0/solr/build/solr-core/test/J2/temp/solr.cloud.TestHdfsCloudBackupRestore_259AE5AA5F7E8D83-001 [junit4] 2> Sep 08, 2017 2:25:33 AM com.carrotsearch.randomizedtesting.ThreadLeakControl checkThreadLeaks [junit4] 2> WARNING: Will linger awaiting termination of 3 leaked thread(s). [junit4] 2> NOTE: test params are: codec=Asserting(Lucene70): {shard_s=Lucene50(blocksize=128), id=PostingsFormat(name=LuceneVarGapFixedInterval)}, docValues:{}, maxPointsInLeafNode=1895, maxMBSortInHeap=6.084574758418689, sim=RandomSimilarity(queryNorm=false): {}, locale=no-NO, timezone=Europe/Luxembourg [junit4] 2> NOTE: Linux 4.4.0-83-generic amd64/Oracle Corporation 1.8.0_144 (64-bit)/cpus=4,threads=6,free=193988048,total=410517504 [junit4] 2> NOTE: All tests run in this JVM: [ReturnFieldsTest, BadIndexSchemaTest, DistributedQueueTest, CloneFieldUpdateProcessorFactoryTest, TestRecovery, TestTrie, InfixSuggestersTest, MetricsHandlerTest, BasicDistributedZk2Test, ReplaceNodeTest, CoreAdminRequestStatusTest, CollectionsAPISolrJTest, SimpleFacetsTest, TestObjectReleaseTracker, UniqFieldsUpdateProcessorFactoryTest, TestTestInjection, SolrMetricManagerTest, TestCopyFieldCollectionResource, TestUseDocValuesAsStored, TestExceedMaxTermLength, AnalysisErrorHandlingTest, CircularListTest, AnalyticsQueryTest, AliasIntegrationTest, DateFieldTest, TestStressInPlaceUpdates, UninvertDocValuesMergePolicyTest, TestFieldCollectionResource, TestLegacyTerms, TestHdfsCloudBackupRestore] [junit4] Completed [106/729 (1!)] on J2 in 81.21s, 1 test, 1 error <<< FAILURES! [...truncated 45471 lines...]
--------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
