Build: https://jenkins.thetaphi.de/job/Lucene-Solr-master-Linux/20679/
Java: 32bit/jdk1.8.0_144 -client -XX:+UseSerialGC
4 tests failed.
FAILED:
org.apache.solr.cloud.DistributedVersionInfoTest.testReplicaVersionHandling
Error Message:
Stack Trace:
java.util.concurrent.TimeoutException
at
__randomizedtesting.SeedInfo.seed([580AD52F77FA400A:84F302D5D5818A4B]:0)
at
org.apache.solr.common.cloud.ZkStateReader.waitForState(ZkStateReader.java:1323)
at
org.apache.solr.cloud.DistributedVersionInfoTest.testReplicaVersionHandling(DistributedVersionInfoTest.java:86)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at
com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984)
at
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at
org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
at
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at
org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
at
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468)
at
com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41)
at
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
at
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at
org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at java.lang.Thread.run(Thread.java:748)
FAILED: org.apache.solr.cloud.ShardSplitTest.testSplitWithChaosMonkey
Error Message:
There are still nodes recoverying - waited for 330 seconds
Stack Trace:
java.lang.AssertionError: There are still nodes recoverying - waited for 330
seconds
at
__randomizedtesting.SeedInfo.seed([580AD52F77FA400A:D32D06FE36FCEB8E]:0)
at org.junit.Assert.fail(Assert.java:93)
at
org.apache.solr.cloud.AbstractDistribZkTestBase.waitForRecoveriesToFinish(AbstractDistribZkTestBase.java:185)
at
org.apache.solr.cloud.AbstractDistribZkTestBase.waitForRecoveriesToFinish(AbstractDistribZkTestBase.java:140)
at
org.apache.solr.cloud.AbstractDistribZkTestBase.waitForRecoveriesToFinish(AbstractDistribZkTestBase.java:135)
at
org.apache.solr.cloud.AbstractFullDistribZkTestBase.waitForRecoveriesToFinish(AbstractFullDistribZkTestBase.java:908)
at
org.apache.solr.cloud.ShardSplitTest.testSplitWithChaosMonkey(ShardSplitTest.java:436)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at
com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984)
at
org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsFixedStatement.callStatement(BaseDistributedSearchTestCase.java:993)
at
org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsStatement.evaluate(BaseDistributedSearchTestCase.java:968)
at
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at
org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
at
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at
org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
at
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468)
at
com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41)
at
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
at
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at
org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at java.lang.Thread.run(Thread.java:748)
FAILED: org.apache.solr.cloud.TestHdfsCloudBackupRestore.test
Error Message:
expected:<COMPLETED> but was:<RUNNING>
Stack Trace:
java.lang.AssertionError: expected:<COMPLETED> but was:<RUNNING>
at
__randomizedtesting.SeedInfo.seed([580AD52F77FA400A:D05EEAF5D9062DF2]:0)
at org.junit.Assert.fail(Assert.java:93)
at org.junit.Assert.failNotEquals(Assert.java:647)
at org.junit.Assert.assertEquals(Assert.java:128)
at org.junit.Assert.assertEquals(Assert.java:147)
at
org.apache.solr.cloud.AbstractCloudBackupRestoreTestCase.testBackupAndRestore(AbstractCloudBackupRestoreTestCase.java:277)
at
org.apache.solr.cloud.AbstractCloudBackupRestoreTestCase.test(AbstractCloudBackupRestoreTestCase.java:136)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at
com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984)
at
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at
org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
at
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at
org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
at
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468)
at
com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41)
at
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
at
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at
org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at java.lang.Thread.run(Thread.java:748)
FAILED:
org.apache.solr.client.solrj.impl.CloudSolrClientTest.testHandlingOfStaleAlias
Error Message:
Fetching cluster properties not supported using the HttpClusterStateProvider.
ZkClientClusterStateProvider can be used for this.
Stack Trace:
java.lang.UnsupportedOperationException: Fetching cluster properties not
supported using the HttpClusterStateProvider. ZkClientClusterStateProvider can
be used for this.
at
__randomizedtesting.SeedInfo.seed([9E0A7DC4920EDD85:8E773A0D67EDDD12]:0)
at
org.apache.solr.client.solrj.impl.HttpClusterStateProvider.getClusterProperties(HttpClusterStateProvider.java:254)
at
org.apache.solr.client.solrj.impl.ClusterStateProvider.getClusterProperty(ClusterStateProvider.java:65)
at
org.apache.solr.client.solrj.impl.CloudSolrClient.sendRequest(CloudSolrClient.java:1019)
at
org.apache.solr.client.solrj.impl.CloudSolrClient.requestWithRetryOnStaleState(CloudSolrClient.java:862)
at
org.apache.solr.client.solrj.impl.CloudSolrClient.request(CloudSolrClient.java:793)
at
org.apache.solr.client.solrj.SolrRequest.process(SolrRequest.java:178)
at
org.apache.solr.client.solrj.SolrRequest.process(SolrRequest.java:195)
at
org.apache.solr.client.solrj.impl.CloudSolrClientTest.testHandlingOfStaleAlias(CloudSolrClientTest.java:226)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at
com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984)
at
org.junit.rules.ExpectedException$ExpectedExceptionStatement.evaluate(ExpectedException.java:110)
at
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at
org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
at
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at
org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
at
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468)
at
com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41)
at
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
at
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at
org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at java.lang.Thread.run(Thread.java:748)
Build Log:
[...truncated 13323 lines...]
[junit4] Suite: org.apache.solr.cloud.TestHdfsCloudBackupRestore
[junit4] 2> Creating dataDir:
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.TestHdfsCloudBackupRestore_580AD52F77FA400A-001/init-core-data-001
[junit4] 2> 2987274 WARN
(SUITE-TestHdfsCloudBackupRestore-seed#[580AD52F77FA400A]-worker) [ ]
o.a.s.SolrTestCaseJ4 startTrackingSearchers: numOpens=12 numCloses=12
[junit4] 2> 2987275 INFO
(SUITE-TestHdfsCloudBackupRestore-seed#[580AD52F77FA400A]-worker) [ ]
o.a.s.SolrTestCaseJ4 Using TrieFields (NUMERIC_POINTS_SYSPROP=false)
w/NUMERIC_DOCVALUES_SYSPROP=false
[junit4] 2> 2987276 INFO
(SUITE-TestHdfsCloudBackupRestore-seed#[580AD52F77FA400A]-worker) [ ]
o.a.s.SolrTestCaseJ4 Randomized ssl (true) and clientAuth (true) via:
@org.apache.solr.util.RandomizeSSL(reason=, ssl=NaN, value=NaN, clientAuth=NaN)
[junit4] 2> 2987277 INFO
(SUITE-TestHdfsCloudBackupRestore-seed#[580AD52F77FA400A]-worker) [ ]
o.a.s.SolrTestCaseJ4 SecureRandom sanity checks:
test.solr.allowed.securerandom=null & java.security.egd=file:/dev/./urandom
[junit4] 1> Formatting using clusterid: testClusterID
[junit4] 2> 2987367 WARN
(SUITE-TestHdfsCloudBackupRestore-seed#[580AD52F77FA400A]-worker) [ ]
o.a.h.m.i.MetricsConfig Cannot locate configuration: tried
hadoop-metrics2-namenode.properties,hadoop-metrics2.properties
[junit4] 2> 2992812 WARN
(SUITE-TestHdfsCloudBackupRestore-seed#[580AD52F77FA400A]-worker) [ ]
o.a.h.h.HttpRequestLog Jetty request log can only be enabled using Log4j
[junit4] 2> 2992816 INFO
(SUITE-TestHdfsCloudBackupRestore-seed#[580AD52F77FA400A]-worker) [ ]
o.m.log jetty-6.1.26
[junit4] 2> 2992836 INFO
(SUITE-TestHdfsCloudBackupRestore-seed#[580AD52F77FA400A]-worker) [ ]
o.m.log Extract
jar:file:/home/jenkins/.ivy2/cache/org.apache.hadoop/hadoop-hdfs/tests/hadoop-hdfs-2.7.4-tests.jar!/webapps/hdfs
to ./temp/Jetty_localhost_localdomain_38561_hdfs____c5y8zp/webapp
[junit4] 2> 2993301 INFO
(SUITE-TestHdfsCloudBackupRestore-seed#[580AD52F77FA400A]-worker) [ ]
o.m.log Started
HttpServer2$SelectChannelConnectorWithSafeStartup@localhost.localdomain:38561
[junit4] 2> 2993455 WARN
(SUITE-TestHdfsCloudBackupRestore-seed#[580AD52F77FA400A]-worker) [ ]
o.a.h.h.HttpRequestLog Jetty request log can only be enabled using Log4j
[junit4] 2> 2993456 INFO
(SUITE-TestHdfsCloudBackupRestore-seed#[580AD52F77FA400A]-worker) [ ]
o.m.log jetty-6.1.26
[junit4] 2> 2993464 INFO
(SUITE-TestHdfsCloudBackupRestore-seed#[580AD52F77FA400A]-worker) [ ]
o.m.log Extract
jar:file:/home/jenkins/.ivy2/cache/org.apache.hadoop/hadoop-hdfs/tests/hadoop-hdfs-2.7.4-tests.jar!/webapps/datanode
to ./temp/Jetty_localhost_34865_datanode____azerbx/webapp
[junit4] 2> 2993909 INFO
(SUITE-TestHdfsCloudBackupRestore-seed#[580AD52F77FA400A]-worker) [ ]
o.m.log Started
HttpServer2$SelectChannelConnectorWithSafeStartup@localhost:34865
[junit4] 2> 2993956 WARN
(SUITE-TestHdfsCloudBackupRestore-seed#[580AD52F77FA400A]-worker) [ ]
o.a.h.h.HttpRequestLog Jetty request log can only be enabled using Log4j
[junit4] 2> 2993957 INFO
(SUITE-TestHdfsCloudBackupRestore-seed#[580AD52F77FA400A]-worker) [ ]
o.m.log jetty-6.1.26
[junit4] 2> 2993966 INFO
(SUITE-TestHdfsCloudBackupRestore-seed#[580AD52F77FA400A]-worker) [ ]
o.m.log Extract
jar:file:/home/jenkins/.ivy2/cache/org.apache.hadoop/hadoop-hdfs/tests/hadoop-hdfs-2.7.4-tests.jar!/webapps/datanode
to ./temp/Jetty_localhost_38805_datanode____rdjxr3/webapp
[junit4] 2> 2994036 ERROR (DataNode:
[[[DISK]file:/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.TestHdfsCloudBackupRestore_580AD52F77FA400A-001/tempDir-001/hdfsBaseDir/data/data1/,
[DISK]file:/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.TestHdfsCloudBackupRestore_580AD52F77FA400A-001/tempDir-001/hdfsBaseDir/data/data2/]]
heartbeating to localhost.localdomain/127.0.0.1:35301) [ ]
o.a.h.h.s.d.DirectoryScanner
dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value below 1
ms/sec. Assuming default value of 1000
[junit4] 2> 2994043 INFO (Block report processor) [ ] BlockStateChange
BLOCK* processReport 0xe3a3ebe75a357: from storage
DS-f50663a9-bf96-42e6-8049-a9eebf5ca9dd node
DatanodeRegistration(127.0.0.1:41065,
datanodeUuid=f952dab0-2576-479d-a837-fbc898026afe, infoPort=37185,
infoSecurePort=0, ipcPort=39491,
storageInfo=lv=-56;cid=testClusterID;nsid=644726944;c=0), blocks: 0,
hasStaleStorage: true, processing time: 0 msecs
[junit4] 2> 2994043 INFO (Block report processor) [ ] BlockStateChange
BLOCK* processReport 0xe3a3ebe75a357: from storage
DS-518826db-1bab-4f75-b504-ba8632c03977 node
DatanodeRegistration(127.0.0.1:41065,
datanodeUuid=f952dab0-2576-479d-a837-fbc898026afe, infoPort=37185,
infoSecurePort=0, ipcPort=39491,
storageInfo=lv=-56;cid=testClusterID;nsid=644726944;c=0), blocks: 0,
hasStaleStorage: false, processing time: 0 msecs
[junit4] 2> 2994436 INFO
(SUITE-TestHdfsCloudBackupRestore-seed#[580AD52F77FA400A]-worker) [ ]
o.m.log Started
HttpServer2$SelectChannelConnectorWithSafeStartup@localhost:38805
[junit4] 2> 2994588 ERROR (DataNode:
[[[DISK]file:/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.TestHdfsCloudBackupRestore_580AD52F77FA400A-001/tempDir-001/hdfsBaseDir/data/data3/,
[DISK]file:/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.TestHdfsCloudBackupRestore_580AD52F77FA400A-001/tempDir-001/hdfsBaseDir/data/data4/]]
heartbeating to localhost.localdomain/127.0.0.1:35301) [ ]
o.a.h.h.s.d.DirectoryScanner
dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value below 1
ms/sec. Assuming default value of 1000
[junit4] 2> 2994596 INFO (Block report processor) [ ] BlockStateChange
BLOCK* processReport 0xe3a3edf640c17: from storage
DS-746b0129-790c-408d-b0cd-c46c31000855 node
DatanodeRegistration(127.0.0.1:36249,
datanodeUuid=2e5c9b9e-1733-47dd-a778-1abdc1a87b7a, infoPort=36645,
infoSecurePort=0, ipcPort=46171,
storageInfo=lv=-56;cid=testClusterID;nsid=644726944;c=0), blocks: 0,
hasStaleStorage: true, processing time: 0 msecs
[junit4] 2> 2994596 INFO (Block report processor) [ ] BlockStateChange
BLOCK* processReport 0xe3a3edf640c17: from storage
DS-4874c137-7b02-4869-a037-23b78b117ee4 node
DatanodeRegistration(127.0.0.1:36249,
datanodeUuid=2e5c9b9e-1733-47dd-a778-1abdc1a87b7a, infoPort=36645,
infoSecurePort=0, ipcPort=46171,
storageInfo=lv=-56;cid=testClusterID;nsid=644726944;c=0), blocks: 0,
hasStaleStorage: false, processing time: 0 msecs
[junit4] 2> 2994716 INFO
(SUITE-TestHdfsCloudBackupRestore-seed#[580AD52F77FA400A]-worker) [ ]
o.a.s.c.MiniSolrCloudCluster Starting cluster of 2 servers in
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.TestHdfsCloudBackupRestore_580AD52F77FA400A-001/tempDir-002
[junit4] 2> 2994716 INFO
(SUITE-TestHdfsCloudBackupRestore-seed#[580AD52F77FA400A]-worker) [ ]
o.a.s.c.ZkTestServer STARTING ZK TEST SERVER
[junit4] 2> 2994717 INFO (Thread-7639) [ ] o.a.s.c.ZkTestServer client
port:0.0.0.0/0.0.0.0:0
[junit4] 2> 2994717 INFO (Thread-7639) [ ] o.a.s.c.ZkTestServer
Starting server
[junit4] 2> 2994720 ERROR (Thread-7639) [ ] o.a.z.s.ZooKeeperServer
ZKShutdownHandler is not registered, so ZooKeeper server won't take any action
on ERROR or SHUTDOWN server state changes
[junit4] 2> 2994817 INFO
(SUITE-TestHdfsCloudBackupRestore-seed#[580AD52F77FA400A]-worker) [ ]
o.a.s.c.ZkTestServer start zk server on port:42417
[junit4] 2> 2994831 INFO (jetty-launcher-4087-thread-1) [ ]
o.e.j.s.Server jetty-9.3.20.v20170531
[junit4] 2> 2994831 INFO (jetty-launcher-4087-thread-2) [ ]
o.e.j.s.Server jetty-9.3.20.v20170531
[junit4] 2> 2994833 INFO (jetty-launcher-4087-thread-1) [ ]
o.e.j.s.h.ContextHandler Started
o.e.j.s.ServletContextHandler@7b47d6{/solr,null,AVAILABLE}
[junit4] 2> 2994834 INFO (jetty-launcher-4087-thread-2) [ ]
o.e.j.s.h.ContextHandler Started
o.e.j.s.ServletContextHandler@14fe3e{/solr,null,AVAILABLE}
[junit4] 2> 2994835 INFO (jetty-launcher-4087-thread-1) [ ]
o.e.j.s.AbstractConnector Started ServerConnector@192ea79{SSL,[ssl,
http/1.1]}{127.0.0.1:46489}
[junit4] 2> 2994835 INFO (jetty-launcher-4087-thread-1) [ ]
o.e.j.s.Server Started @2995961ms
[junit4] 2> 2994835 INFO (jetty-launcher-4087-thread-1) [ ]
o.a.s.c.s.e.JettySolrRunner Jetty properties: {hostContext=/solr,
hostPort=46489}
[junit4] 2> 2994835 INFO (jetty-launcher-4087-thread-2) [ ]
o.e.j.s.AbstractConnector Started ServerConnector@1b4f69c{SSL,[ssl,
http/1.1]}{127.0.0.1:41659}
[junit4] 2> 2994836 INFO (jetty-launcher-4087-thread-2) [ ]
o.e.j.s.Server Started @2995961ms
[junit4] 2> 2994836 INFO (jetty-launcher-4087-thread-2) [ ]
o.a.s.c.s.e.JettySolrRunner Jetty properties: {hostContext=/solr,
hostPort=41659}
[junit4] 2> 2994836 ERROR (jetty-launcher-4087-thread-1) [ ]
o.a.s.u.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be
missing or incomplete.
[junit4] 2> 2994837 INFO (jetty-launcher-4087-thread-1) [ ]
o.a.s.s.SolrDispatchFilter ___ _ Welcome to Apache Solr™ version
8.0.0
[junit4] 2> 2994837 ERROR (jetty-launcher-4087-thread-2) [ ]
o.a.s.u.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be
missing or incomplete.
[junit4] 2> 2994837 INFO (jetty-launcher-4087-thread-1) [ ]
o.a.s.s.SolrDispatchFilter / __| ___| |_ _ Starting in cloud mode on port null
[junit4] 2> 2994837 INFO (jetty-launcher-4087-thread-2) [ ]
o.a.s.s.SolrDispatchFilter ___ _ Welcome to Apache Solr™ version
8.0.0
[junit4] 2> 2994837 INFO (jetty-launcher-4087-thread-1) [ ]
o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_| Install dir: null, Default config
dir: null
[junit4] 2> 2994837 INFO (jetty-launcher-4087-thread-2) [ ]
o.a.s.s.SolrDispatchFilter / __| ___| |_ _ Starting in cloud mode on port null
[junit4] 2> 2994837 INFO (jetty-launcher-4087-thread-1) [ ]
o.a.s.s.SolrDispatchFilter |___/\___/_|_| Start time:
2017-10-15T16:44:02.309Z
[junit4] 2> 2994837 INFO (jetty-launcher-4087-thread-2) [ ]
o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_| Install dir: null, Default config
dir: null
[junit4] 2> 2994838 INFO (jetty-launcher-4087-thread-2) [ ]
o.a.s.s.SolrDispatchFilter |___/\___/_|_| Start time:
2017-10-15T16:44:02.310Z
[junit4] 2> 2994841 INFO (jetty-launcher-4087-thread-1) [ ]
o.a.s.s.SolrDispatchFilter solr.xml found in ZooKeeper. Loading...
[junit4] 2> 2994841 INFO (jetty-launcher-4087-thread-2) [ ]
o.a.s.s.SolrDispatchFilter solr.xml found in ZooKeeper. Loading...
[junit4] 2> 2994857 INFO (jetty-launcher-4087-thread-2) [ ]
o.a.s.c.SolrXmlConfig MBean server found:
com.sun.jmx.mbeanserver.JmxMBeanServer@22017d, but no JMX reporters were
configured - adding default JMX reporter.
[junit4] 2> 2994859 INFO (jetty-launcher-4087-thread-1) [ ]
o.a.s.c.SolrXmlConfig MBean server found:
com.sun.jmx.mbeanserver.JmxMBeanServer@22017d, but no JMX reporters were
configured - adding default JMX reporter.
[junit4] 2> 2994862 INFO (jetty-launcher-4087-thread-2) [ ]
o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:42417/solr
[junit4] 2> 2994862 INFO (jetty-launcher-4087-thread-1) [ ]
o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:42417/solr
[junit4] 2> 2994864 WARN (NIOServerCxn.Factory:0.0.0.0/0.0.0.0:0) [ ]
o.a.z.s.NIOServerCnxn caught end of stream exception
[junit4] 2> EndOfStreamException: Unable to read additional data from
client sessionid 0x15f20ebc10e0004, likely client has closed socket
[junit4] 2> at
org.apache.zookeeper.server.NIOServerCnxn.doIO(NIOServerCnxn.java:239)
[junit4] 2> at
org.apache.zookeeper.server.NIOServerCnxnFactory.run(NIOServerCnxnFactory.java:203)
[junit4] 2> at java.lang.Thread.run(Thread.java:748)
[junit4] 2> 2994920 INFO (jetty-launcher-4087-thread-2)
[n:127.0.0.1:41659_solr ] o.a.s.c.Overseer Overseer (id=null) closing
[junit4] 2> 2994920 INFO (jetty-launcher-4087-thread-1)
[n:127.0.0.1:46489_solr ] o.a.s.c.Overseer Overseer (id=null) closing
[junit4] 2> 2994920 INFO (jetty-launcher-4087-thread-2)
[n:127.0.0.1:41659_solr ] o.a.s.c.OverseerElectionContext I am going to be
the leader 127.0.0.1:41659_solr
[junit4] 2> 2994920 INFO (jetty-launcher-4087-thread-1)
[n:127.0.0.1:46489_solr ] o.a.s.c.ZkController Register node as live in
ZooKeeper:/live_nodes/127.0.0.1:46489_solr
[junit4] 2> 2994920 INFO (jetty-launcher-4087-thread-2)
[n:127.0.0.1:41659_solr ] o.a.s.c.Overseer Overseer
(id=98833913753763845-127.0.0.1:41659_solr-n_0000000000) starting
[junit4] 2> 2994921 INFO
(zkCallback-4098-thread-1-processing-n:127.0.0.1:41659_solr)
[n:127.0.0.1:41659_solr ] o.a.s.c.c.ZkStateReader Updated live nodes from
ZooKeeper... (0) -> (1)
[junit4] 2> 2994921 INFO
(zkCallback-4099-thread-1-processing-n:127.0.0.1:46489_solr)
[n:127.0.0.1:46489_solr ] o.a.s.c.c.ZkStateReader Updated live nodes from
ZooKeeper... (0) -> (1)
[junit4] 2> 2994924 INFO (jetty-launcher-4087-thread-2)
[n:127.0.0.1:41659_solr ] o.a.s.c.ZkController Register node as live in
ZooKeeper:/live_nodes/127.0.0.1:41659_solr
[junit4] 2> 2994925 INFO
(zkCallback-4098-thread-1-processing-n:127.0.0.1:41659_solr)
[n:127.0.0.1:41659_solr ] o.a.s.c.c.ZkStateReader Updated live nodes from
ZooKeeper... (1) -> (2)
[junit4] 2> 2994925 INFO
(zkCallback-4099-thread-1-processing-n:127.0.0.1:46489_solr)
[n:127.0.0.1:46489_solr ] o.a.s.c.c.ZkStateReader Updated live nodes from
ZooKeeper... (1) -> (2)
[junit4] 2> 2995004 INFO (jetty-launcher-4087-thread-1)
[n:127.0.0.1:46489_solr ] o.a.s.c.b.r.BackupRepositoryFactory Added backup
repository with configuration params {type = repository,name = hdfs,class =
org.apache.solr.core.backup.repository.HdfsBackupRepository,attributes =
{name=hdfs,
class=org.apache.solr.core.backup.repository.HdfsBackupRepository},args =
{location=/backup,solr.hdfs.home=hdfs://localhost.localdomain:35301/solr,solr.hdfs.confdir=}}
[junit4] 2> 2995004 INFO (jetty-launcher-4087-thread-1)
[n:127.0.0.1:46489_solr ] o.a.s.c.b.r.BackupRepositoryFactory Default
configuration for backup repository is with configuration params {type =
repository,name = hdfs,class =
org.apache.solr.core.backup.repository.HdfsBackupRepository,attributes =
{name=hdfs,
class=org.apache.solr.core.backup.repository.HdfsBackupRepository},args =
{location=/backup,solr.hdfs.home=hdfs://localhost.localdomain:35301/solr,solr.hdfs.confdir=}}
[junit4] 2> 2995031 INFO (jetty-launcher-4087-thread-1)
[n:127.0.0.1:46489_solr ] o.a.s.m.r.SolrJmxReporter JMX monitoring for
'solr.node' (registry 'solr.node') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@22017d
[junit4] 2> 2995041 INFO (jetty-launcher-4087-thread-1)
[n:127.0.0.1:46489_solr ] o.a.s.m.r.SolrJmxReporter JMX monitoring for
'solr.jvm' (registry 'solr.jvm') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@22017d
[junit4] 2> 2995041 INFO (jetty-launcher-4087-thread-1)
[n:127.0.0.1:46489_solr ] o.a.s.m.r.SolrJmxReporter JMX monitoring for
'solr.jetty' (registry 'solr.jetty') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@22017d
[junit4] 2> 2995042 INFO (jetty-launcher-4087-thread-1)
[n:127.0.0.1:46489_solr ] o.a.s.c.CorePropertiesLocator Found 0 core
definitions underneath
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.TestHdfsCloudBackupRestore_580AD52F77FA400A-001/tempDir-002/node1/.
[junit4] 2> 2995118 INFO (jetty-launcher-4087-thread-2)
[n:127.0.0.1:41659_solr ] o.a.s.c.b.r.BackupRepositoryFactory Added backup
repository with configuration params {type = repository,name = hdfs,class =
org.apache.solr.core.backup.repository.HdfsBackupRepository,attributes =
{name=hdfs,
class=org.apache.solr.core.backup.repository.HdfsBackupRepository},args =
{location=/backup,solr.hdfs.home=hdfs://localhost.localdomain:35301/solr,solr.hdfs.confdir=}}
[junit4] 2> 2995118 INFO (jetty-launcher-4087-thread-2)
[n:127.0.0.1:41659_solr ] o.a.s.c.b.r.BackupRepositoryFactory Default
configuration for backup repository is with configuration params {type =
repository,name = hdfs,class =
org.apache.solr.core.backup.repository.HdfsBackupRepository,attributes =
{name=hdfs,
class=org.apache.solr.core.backup.repository.HdfsBackupRepository},args =
{location=/backup,solr.hdfs.home=hdfs://localhost.localdomain:35301/solr,solr.hdfs.confdir=}}
[junit4] 2> 2995143 INFO (jetty-launcher-4087-thread-2)
[n:127.0.0.1:41659_solr ] o.a.s.m.r.SolrJmxReporter JMX monitoring for
'solr.node' (registry 'solr.node') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@22017d
[junit4] 2> 2995154 INFO (jetty-launcher-4087-thread-2)
[n:127.0.0.1:41659_solr ] o.a.s.m.r.SolrJmxReporter JMX monitoring for
'solr.jvm' (registry 'solr.jvm') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@22017d
[junit4] 2> 2995154 INFO (jetty-launcher-4087-thread-2)
[n:127.0.0.1:41659_solr ] o.a.s.m.r.SolrJmxReporter JMX monitoring for
'solr.jetty' (registry 'solr.jetty') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@22017d
[junit4] 2> 2995155 INFO (jetty-launcher-4087-thread-2)
[n:127.0.0.1:41659_solr ] o.a.s.c.CorePropertiesLocator Found 0 core
definitions underneath
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.TestHdfsCloudBackupRestore_580AD52F77FA400A-001/tempDir-002/node2/.
[junit4] 2> 2995182 INFO
(SUITE-TestHdfsCloudBackupRestore-seed#[580AD52F77FA400A]-worker) [ ]
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (2)
[junit4] 2> 2995182 INFO
(SUITE-TestHdfsCloudBackupRestore-seed#[580AD52F77FA400A]-worker) [ ]
o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:42417/solr ready
[junit4] 2> 2995207 INFO
(TEST-TestHdfsCloudBackupRestore.test-seed#[580AD52F77FA400A]) [ ]
o.a.s.SolrTestCaseJ4 ###Starting test
[junit4] 2> 2995269 INFO (qtp933797-27167) [n:127.0.0.1:41659_solr ]
o.a.s.h.a.CollectionsHandler Invoked Collection Action :create with params
replicationFactor=2&collection.configName=conf1&router.name=implicit&version=2&pullReplicas=1&shards=shard1,shard2&property.customKey=customValue&maxShardsPerNode=4&router.field=shard_s&autoAddReplicas=true&name=hdfsbackuprestore&nrtReplicas=2&action=CREATE&tlogReplicas=1&wt=javabin
and sendToOCPQueue=true
[junit4] 2> 2995271 INFO
(OverseerThreadFactory-9366-thread-1-processing-n:127.0.0.1:41659_solr)
[n:127.0.0.1:41659_solr ] o.a.s.c.CreateCollectionCmd Create collection
hdfsbackuprestore
[junit4] 2> 2995272 WARN
(OverseerThreadFactory-9366-thread-1-processing-n:127.0.0.1:41659_solr)
[n:127.0.0.1:41659_solr ] o.a.s.c.CreateCollectionCmd Specified number of
replicas of 4 on collection hdfsbackuprestore is higher than the number of Solr
instances currently live or live and part of your createNodeSet(2). It's
unusual to run two replica of the same slice on the same Solr-instance.
[junit4] 2> 2995381 INFO
(OverseerStateUpdate-98833913753763845-127.0.0.1:41659_solr-n_0000000000)
[n:127.0.0.1:41659_solr ] o.a.s.c.o.SliceMutator createReplica() {
[junit4] 2> "operation":"ADDREPLICA",
[junit4] 2> "collection":"hdfsbackuprestore",
[junit4] 2> "shard":"shard1",
[junit4] 2> "core":"hdfsbackuprestore_shard1_replica_n1",
[junit4] 2> "state":"down",
[junit4] 2> "base_url":"https://127.0.0.1:46489/solr",
[junit4] 2> "type":"NRT"}
[junit4] 2> 2995383 INFO
(OverseerStateUpdate-98833913753763845-127.0.0.1:41659_solr-n_0000000000)
[n:127.0.0.1:41659_solr ] o.a.s.c.o.SliceMutator createReplica() {
[junit4] 2> "operation":"ADDREPLICA",
[junit4] 2> "collection":"hdfsbackuprestore",
[junit4] 2> "shard":"shard1",
[junit4] 2> "core":"hdfsbackuprestore_shard1_replica_n2",
[junit4] 2> "state":"down",
[junit4] 2> "base_url":"https://127.0.0.1:41659/solr",
[junit4] 2> "type":"NRT"}
[junit4] 2> 2995386 INFO
(OverseerStateUpdate-98833913753763845-127.0.0.1:41659_solr-n_0000000000)
[n:127.0.0.1:41659_solr ] o.a.s.c.o.SliceMutator createReplica() {
[junit4] 2> "operation":"ADDREPLICA",
[junit4] 2> "collection":"hdfsbackuprestore",
[junit4] 2> "shard":"shard1",
[junit4] 2> "core":"hdfsbackuprestore_shard1_replica_t4",
[junit4] 2> "state":"down",
[junit4] 2> "base_url":"https://127.0.0.1:46489/solr",
[junit4] 2> "type":"TLOG"}
[junit4] 2> 2995388 INFO
(OverseerStateUpdate-98833913753763845-127.0.0.1:41659_solr-n_0000000000)
[n:127.0.0.1:41659_solr ] o.a.s.c.o.SliceMutator createReplica() {
[junit4] 2> "operation":"ADDREPLICA",
[junit4] 2> "collection":"hdfsbackuprestore",
[junit4] 2> "shard":"shard1",
[junit4] 2> "core":"hdfsbackuprestore_shard1_replica_p6",
[junit4] 2> "state":"down",
[junit4] 2> "base_url":"https://127.0.0.1:41659/solr",
[junit4] 2> "type":"PULL"}
[junit4] 2> 2995391 INFO
(OverseerStateUpdate-98833913753763845-127.0.0.1:41659_solr-n_0000000000)
[n:127.0.0.1:41659_solr ] o.a.s.c.o.SliceMutator createReplica() {
[junit4] 2> "operation":"ADDREPLICA",
[junit4] 2> "collection":"hdfsbackuprestore",
[junit4] 2> "shard":"shard2",
[junit4] 2> "core":"hdfsbackuprestore_shard2_replica_n8",
[junit4] 2> "state":"down",
[junit4] 2> "base_url":"https://127.0.0.1:46489/solr",
[junit4] 2> "type":"NRT"}
[junit4] 2> 2995393 INFO
(OverseerStateUpdate-98833913753763845-127.0.0.1:41659_solr-n_0000000000)
[n:127.0.0.1:41659_solr ] o.a.s.c.o.SliceMutator createReplica() {
[junit4] 2> "operation":"ADDREPLICA",
[junit4] 2> "collection":"hdfsbackuprestore",
[junit4] 2> "shard":"shard2",
[junit4] 2> "core":"hdfsbackuprestore_shard2_replica_n10",
[junit4] 2> "state":"down",
[junit4] 2> "base_url":"https://127.0.0.1:41659/solr",
[junit4] 2> "type":"NRT"}
[junit4] 2> 2995396 INFO
(OverseerStateUpdate-98833913753763845-127.0.0.1:41659_solr-n_0000000000)
[n:127.0.0.1:41659_solr ] o.a.s.c.o.SliceMutator createReplica() {
[junit4] 2> "operation":"ADDREPLICA",
[junit4] 2> "collection":"hdfsbackuprestore",
[junit4] 2> "shard":"shard2",
[junit4] 2> "core":"hdfsbackuprestore_shard2_replica_t12",
[junit4] 2> "state":"down",
[junit4] 2> "base_url":"https://127.0.0.1:46489/solr",
[junit4] 2> "type":"TLOG"}
[junit4] 2> 2995398 INFO
(OverseerStateUpdate-98833913753763845-127.0.0.1:41659_solr-n_0000000000)
[n:127.0.0.1:41659_solr ] o.a.s.c.o.SliceMutator createReplica() {
[junit4] 2> "operation":"ADDREPLICA",
[junit4] 2> "collection":"hdfsbackuprestore",
[junit4] 2> "shard":"shard2",
[junit4] 2> "core":"hdfsbackuprestore_shard2_replica_p14",
[junit4] 2> "state":"down",
[junit4] 2> "base_url":"https://127.0.0.1:41659/solr",
[junit4] 2> "type":"PULL"}
[junit4] 2> 2995620 INFO (qtp933797-27182) [n:127.0.0.1:41659_solr ]
o.a.s.h.a.CoreAdminOperation core create command
qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore&version=2&replicaType=NRT&property.customKey=customValue&coreNodeName=core_node5&name=hdfsbackuprestore_shard1_replica_n2&action=CREATE&numShards=2&shard=shard1&wt=javabin
[junit4] 2> 2995620 INFO (qtp933797-27181) [n:127.0.0.1:41659_solr ]
o.a.s.h.a.CoreAdminOperation core create command
qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore&version=2&replicaType=PULL&property.customKey=customValue&coreNodeName=core_node9&name=hdfsbackuprestore_shard1_replica_p6&action=CREATE&numShards=2&shard=shard1&wt=javabin
[junit4] 2> 2995620 INFO (qtp933797-27233) [n:127.0.0.1:41659_solr ]
o.a.s.h.a.CoreAdminOperation core create command
qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore&version=2&replicaType=NRT&property.customKey=customValue&coreNodeName=core_node13&name=hdfsbackuprestore_shard2_replica_n10&action=CREATE&numShards=2&shard=shard2&wt=javabin
[junit4] 2> 2995621 INFO (qtp933797-27182) [n:127.0.0.1:41659_solr ]
o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 2147483647
transient cores
[junit4] 2> 2995623 INFO (qtp933797-27235) [n:127.0.0.1:41659_solr ]
o.a.s.h.a.CoreAdminOperation core create command
qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore&version=2&replicaType=PULL&property.customKey=customValue&coreNodeName=core_node16&name=hdfsbackuprestore_shard2_replica_p14&action=CREATE&numShards=2&shard=shard2&wt=javabin
[junit4] 2> 2995700 INFO (qtp30274821-27166) [n:127.0.0.1:46489_solr ]
o.a.s.h.a.CoreAdminOperation core create command
qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore&version=2&replicaType=NRT&property.customKey=customValue&coreNodeName=core_node11&name=hdfsbackuprestore_shard2_replica_n8&action=CREATE&numShards=2&shard=shard2&wt=javabin
[junit4] 2> 2995700 INFO (qtp30274821-27166) [n:127.0.0.1:46489_solr ]
o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 2147483647
transient cores
[junit4] 2> 2995718 INFO (qtp30274821-27237) [n:127.0.0.1:46489_solr ]
o.a.s.h.a.CoreAdminOperation core create command
qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore&version=2&replicaType=TLOG&property.customKey=customValue&coreNodeName=core_node7&name=hdfsbackuprestore_shard1_replica_t4&action=CREATE&numShards=2&shard=shard1&wt=javabin
[junit4] 2> 2995732 INFO (qtp30274821-27176) [n:127.0.0.1:46489_solr ]
o.a.s.h.a.CoreAdminOperation core create command
qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore&version=2&replicaType=NRT&property.customKey=customValue&coreNodeName=core_node3&name=hdfsbackuprestore_shard1_replica_n1&action=CREATE&numShards=2&shard=shard1&wt=javabin
[junit4] 2> 2995733 INFO (qtp30274821-27241) [n:127.0.0.1:46489_solr ]
o.a.s.h.a.CoreAdminOperation core create command
qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore&version=2&replicaType=TLOG&property.customKey=customValue&coreNodeName=core_node15&name=hdfsbackuprestore_shard2_replica_t12&action=CREATE&numShards=2&shard=shard2&wt=javabin
[junit4] 2> 2995836 INFO
(zkCallback-4098-thread-1-processing-n:127.0.0.1:41659_solr)
[n:127.0.0.1:41659_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 2995836 INFO
(zkCallback-4099-thread-1-processing-n:127.0.0.1:46489_solr)
[n:127.0.0.1:46489_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 2995836 INFO
(zkCallback-4099-thread-2-processing-n:127.0.0.1:46489_solr)
[n:127.0.0.1:46489_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 2996982 INFO (qtp933797-27233) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard2 r:core_node13
x:hdfsbackuprestore_shard2_replica_n10] o.a.s.c.SolrConfig Using Lucene
MatchVersion: 8.0.0
[junit4] 2> 2996982 INFO (qtp933797-27235) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard2 r:core_node16
x:hdfsbackuprestore_shard2_replica_p14] o.a.s.c.SolrConfig Using Lucene
MatchVersion: 8.0.0
[junit4] 2> 2996984 INFO (qtp933797-27181) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard1 r:core_node9
x:hdfsbackuprestore_shard1_replica_p6] o.a.s.c.SolrConfig Using Lucene
MatchVersion: 8.0.0
[junit4] 2> 2996988 INFO (qtp933797-27182) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_n2] o.a.s.c.SolrConfig Using Lucene
MatchVersion: 8.0.0
[junit4] 2> 2996997 INFO (qtp933797-27233) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard2 r:core_node13
x:hdfsbackuprestore_shard2_replica_n10] o.a.s.s.IndexSchema
[hdfsbackuprestore_shard2_replica_n10] Schema name=minimal
[junit4] 2> 2996998 INFO (qtp933797-27235) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard2 r:core_node16
x:hdfsbackuprestore_shard2_replica_p14] o.a.s.s.IndexSchema
[hdfsbackuprestore_shard2_replica_p14] Schema name=minimal
[junit4] 2> 2996998 INFO (qtp30274821-27241) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard2 r:core_node15
x:hdfsbackuprestore_shard2_replica_t12] o.a.s.c.SolrConfig Using Lucene
MatchVersion: 8.0.0
[junit4] 2> 2996999 INFO (qtp30274821-27176) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard1 r:core_node3
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.SolrConfig Using Lucene
MatchVersion: 8.0.0
[junit4] 2> 2996999 INFO (qtp933797-27181) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard1 r:core_node9
x:hdfsbackuprestore_shard1_replica_p6] o.a.s.s.IndexSchema
[hdfsbackuprestore_shard1_replica_p6] Schema name=minimal
[junit4] 2> 2997001 INFO (qtp933797-27233) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard2 r:core_node13
x:hdfsbackuprestore_shard2_replica_n10] o.a.s.s.IndexSchema Loaded schema
minimal/1.1 with uniqueid field id
[junit4] 2> 2997009 INFO (qtp933797-27233) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard2 r:core_node13
x:hdfsbackuprestore_shard2_replica_n10] o.a.s.c.CoreContainer Creating SolrCore
'hdfsbackuprestore_shard2_replica_n10' using configuration from collection
hdfsbackuprestore, trusted=true
[junit4] 2> 2997010 INFO (qtp933797-27233) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard2 r:core_node13
x:hdfsbackuprestore_shard2_replica_n10] o.a.s.m.r.SolrJmxReporter JMX
monitoring for 'solr.core.hdfsbackuprestore.shard2.replica_n10' (registry
'solr.core.hdfsbackuprestore.shard2.replica_n10') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@22017d
[junit4] 2> 2997010 INFO (qtp933797-27233) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard2 r:core_node13
x:hdfsbackuprestore_shard2_replica_n10] o.a.s.c.SolrCore
solr.RecoveryStrategy.Builder
[junit4] 2> 2997011 INFO (qtp30274821-27237) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard1 r:core_node7
x:hdfsbackuprestore_shard1_replica_t4] o.a.s.c.SolrConfig Using Lucene
MatchVersion: 8.0.0
[junit4] 2> 2997011 INFO (qtp933797-27233) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard2 r:core_node13
x:hdfsbackuprestore_shard2_replica_n10] o.a.s.c.SolrCore
[[hdfsbackuprestore_shard2_replica_n10] ] Opening new SolrCore at
[/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.TestHdfsCloudBackupRestore_580AD52F77FA400A-001/tempDir-002/node2/hdfsbackuprestore_shard2_replica_n10],
dataDir=[/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.TestHdfsCloudBackupRestore_580AD52F77FA400A-001/tempDir-002/node2/./hdfsbackuprestore_shard2_replica_n10/data/]
[junit4] 2> 2997011 INFO (qtp30274821-27166) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard2 r:core_node11
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.c.SolrConfig Using Lucene
MatchVersion: 8.0.0
[junit4] 2> 2997011 INFO (qtp933797-27235) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard2 r:core_node16
x:hdfsbackuprestore_shard2_replica_p14] o.a.s.s.IndexSchema Loaded schema
minimal/1.1 with uniqueid field id
[junit4] 2> 2997012 INFO (qtp933797-27235) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard2 r:core_node16
x:hdfsbackuprestore_shard2_replica_p14] o.a.s.c.CoreContainer Creating SolrCore
'hdfsbackuprestore_shard2_replica_p14' using configuration from collection
hdfsbackuprestore, trusted=true
[junit4] 2> 2997012 INFO (qtp933797-27182) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_n2] o.a.s.s.IndexSchema
[hdfsbackuprestore_shard1_replica_n2] Schema name=minimal
[junit4] 2> 2997012 INFO (qtp933797-27235) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard2 r:core_node16
x:hdfsbackuprestore_shard2_replica_p14] o.a.s.m.r.SolrJmxReporter JMX
monitoring for 'solr.core.hdfsbackuprestore.shard2.replica_p14' (registry
'solr.core.hdfsbackuprestore.shard2.replica_p14') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@22017d
[junit4] 2> 2997012 INFO (qtp933797-27235) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard2 r:core_node16
x:hdfsbackuprestore_shard2_replica_p14] o.a.s.c.SolrCore
solr.RecoveryStrategy.Builder
[junit4] 2> 2997012 INFO (qtp933797-27181) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard1 r:core_node9
x:hdfsbackuprestore_shard1_replica_p6] o.a.s.s.IndexSchema Loaded schema
minimal/1.1 with uniqueid field id
[junit4] 2> 2997013 INFO (qtp933797-27181) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard1 r:core_node9
x:hdfsbackuprestore_shard1_replica_p6] o.a.s.c.CoreContainer Creating SolrCore
'hdfsbackuprestore_shard1_replica_p6' using configuration from collection
hdfsbackuprestore, trusted=true
[junit4] 2> 2997013 INFO (qtp933797-27235) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard2 r:core_node16
x:hdfsbackuprestore_shard2_replica_p14] o.a.s.c.SolrCore
[[hdfsbackuprestore_shard2_replica_p14] ] Opening new SolrCore at
[/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.TestHdfsCloudBackupRestore_580AD52F77FA400A-001/tempDir-002/node2/hdfsbackuprestore_shard2_replica_p14],
dataDir=[/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.TestHdfsCloudBackupRestore_580AD52F77FA400A-001/tempDir-002/node2/./hdfsbackuprestore_shard2_replica_p14/data/]
[junit4] 2> 2997015 INFO (qtp933797-27181) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard1 r:core_node9
x:hdfsbackuprestore_shard1_replica_p6] o.a.s.m.r.SolrJmxReporter JMX monitoring
for 'solr.core.hdfsbackuprestore.shard1.replica_p6' (registry
'solr.core.hdfsbackuprestore.shard1.replica_p6') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@22017d
[junit4] 2> 2997015 INFO (qtp933797-27181) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard1 r:core_node9
x:hdfsbackuprestore_shard1_replica_p6] o.a.s.c.SolrCore
solr.RecoveryStrategy.Builder
[junit4] 2> 2997016 INFO (qtp933797-27181) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard1 r:core_node9
x:hdfsbackuprestore_shard1_replica_p6] o.a.s.c.SolrCore
[[hdfsbackuprestore_shard1_replica_p6] ] Opening new SolrCore at
[/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.TestHdfsCloudBackupRestore_580AD52F77FA400A-001/tempDir-002/node2/hdfsbackuprestore_shard1_replica_p6],
dataDir=[/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.TestHdfsCloudBackupRestore_580AD52F77FA400A-001/tempDir-002/node2/./hdfsbackuprestore_shard1_replica_p6/data/]
[junit4] 2> 2997016 INFO (qtp933797-27182) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_n2] o.a.s.s.IndexSchema Loaded schema
minimal/1.1 with uniqueid field id
[junit4] 2> 2997016 INFO (qtp933797-27182) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_n2] o.a.s.c.CoreContainer Creating SolrCore
'hdfsbackuprestore_shard1_replica_n2' using configuration from collection
hdfsbackuprestore, trusted=true
[junit4] 2> 2997017 INFO (qtp933797-27182) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_n2] o.a.s.m.r.SolrJmxReporter JMX monitoring
for 'solr.core.hdfsbackuprestore.shard1.replica_n2' (registry
'solr.core.hdfsbackuprestore.shard1.replica_n2') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@22017d
[junit4] 2> 2997017 INFO (qtp933797-27182) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_n2] o.a.s.c.SolrCore
solr.RecoveryStrategy.Builder
[junit4] 2> 2997017 INFO (qtp933797-27182) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_n2] o.a.s.c.SolrCore
[[hdfsbackuprestore_shard1_replica_n2] ] Opening new SolrCore at
[/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.TestHdfsCloudBackupRestore_580AD52F77FA400A-001/tempDir-002/node2/hdfsbackuprestore_shard1_replica_n2],
dataDir=[/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.TestHdfsCloudBackupRestore_580AD52F77FA400A-001/tempDir-002/node2/./hdfsbackuprestore_shard1_replica_n2/data/]
[junit4] 2> 2997021 INFO (qtp30274821-27241) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard2 r:core_node15
x:hdfsbackuprestore_shard2_replica_t12] o.a.s.s.IndexSchema
[hdfsbackuprestore_shard2_replica_t12] Schema name=minimal
[junit4] 2> 2997024 INFO (qtp30274821-27176) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard1 r:core_node3
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.s.IndexSchema
[hdfsbackuprestore_shard1_replica_n1] Schema name=minimal
[junit4] 2> 2997025 INFO (qtp30274821-27241) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard2 r:core_node15
x:hdfsbackuprestore_shard2_replica_t12] o.a.s.s.IndexSchema Loaded schema
minimal/1.1 with uniqueid field id
[junit4] 2> 2997025 INFO (qtp30274821-27241) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard2 r:core_node15
x:hdfsbackuprestore_shard2_replica_t12] o.a.s.c.CoreContainer Creating SolrCore
'hdfsbackuprestore_shard2_replica_t12' using configuration from collection
hdfsbackuprestore, trusted=true
[junit4] 2> 2997025 INFO (qtp30274821-27241) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard2 r:core_node15
x:hdfsbackuprestore_shard2_replica_t12] o.a.s.m.r.SolrJmxReporter JMX
monitoring for 'solr.core.hdfsbackuprestore.shard2.replica_t12' (registry
'solr.core.hdfsbackuprestore.shard2.replica_t12') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@22017d
[junit4] 2> 2997025 INFO (qtp30274821-27241) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard2 r:core_node15
x:hdfsbackuprestore_shard2_replica_t12] o.a.s.c.SolrCore
solr.RecoveryStrategy.Builder
[junit4] 2> 2997025 INFO (qtp30274821-27241) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard2 r:core_node15
x:hdfsbackuprestore_shard2_replica_t12] o.a.s.c.SolrCore
[[hdfsbackuprestore_shard2_replica_t12] ] Opening new SolrCore at
[/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.TestHdfsCloudBackupRestore_580AD52F77FA400A-001/tempDir-002/node1/hdfsbackuprestore_shard2_replica_t12],
dataDir=[/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.TestHdfsCloudBackupRestore_580AD52F77FA400A-001/tempDir-002/node1/./hdfsbackuprestore_shard2_replica_t12/data/]
[junit4] 2> 2997027 INFO (qtp30274821-27176) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard1 r:core_node3
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.s.IndexSchema Loaded schema
minimal/1.1 with uniqueid field id
[junit4] 2> 2997027 INFO (qtp30274821-27176) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard1 r:core_node3
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.CoreContainer Creating SolrCore
'hdfsbackuprestore_shard1_replica_n1' using configuration from collection
hdfsbackuprestore, trusted=true
[junit4] 2> 2997028 INFO (qtp30274821-27166) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard2 r:core_node11
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.s.IndexSchema
[hdfsbackuprestore_shard2_replica_n8] Schema name=minimal
[junit4] 2> 2997028 INFO (qtp30274821-27176) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard1 r:core_node3
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.m.r.SolrJmxReporter JMX monitoring
for 'solr.core.hdfsbackuprestore.shard1.replica_n1' (registry
'solr.core.hdfsbackuprestore.shard1.replica_n1') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@22017d
[junit4] 2> 2997028 INFO (qtp30274821-27176) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard1 r:core_node3
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.SolrCore
solr.RecoveryStrategy.Builder
[junit4] 2> 2997028 INFO (qtp30274821-27176) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard1 r:core_node3
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.SolrCore
[[hdfsbackuprestore_shard1_replica_n1] ] Opening new SolrCore at
[/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.TestHdfsCloudBackupRestore_580AD52F77FA400A-001/tempDir-002/node1/hdfsbackuprestore_shard1_replica_n1],
dataDir=[/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.TestHdfsCloudBackupRestore_580AD52F77FA400A-001/tempDir-002/node1/./hdfsbackuprestore_shard1_replica_n1/data/]
[junit4] 2> 2997031 INFO (qtp30274821-27237) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard1 r:core_node7
x:hdfsbackuprestore_shard1_replica_t4] o.a.s.s.IndexSchema
[hdfsbackuprestore_shard1_replica_t4] Schema name=minimal
[junit4] 2> 2997031 INFO (qtp30274821-27166) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard2 r:core_node11
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.s.IndexSchema Loaded schema
minimal/1.1 with uniqueid field id
[junit4] 2> 2997031 INFO (qtp30274821-27166) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard2 r:core_node11
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.c.CoreContainer Creating SolrCore
'hdfsbackuprestore_shard2_replica_n8' using configuration from collection
hdfsbackuprestore, trusted=true
[junit4] 2> 2997032 INFO (qtp30274821-27166) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard2 r:core_node11
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.m.r.SolrJmxReporter JMX monitoring
for 'solr.core.hdfsbackuprestore.shard2.replica_n8' (registry
'solr.core.hdfsbackuprestore.shard2.replica_n8') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@22017d
[junit4] 2> 2997032 INFO (qtp30274821-27166) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard2 r:core_node11
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.c.SolrCore
solr.RecoveryStrategy.Builder
[junit4] 2> 2997032 INFO (qtp30274821-27166) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard2 r:core_node11
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.c.SolrCore
[[hdfsbackuprestore_shard2_replica_n8] ] Opening new SolrCore at
[/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.TestHdfsCloudBackupRestore_580AD52F77FA400A-001/tempDir-002/node1/hdfsbackuprestore_shard2_replica_n8],
dataDir=[/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.TestHdfsCloudBackupRestore_580AD52F77FA400A-001/tempDir-002/node1/./hdfsbackuprestore_shard2_replica_n8/data/]
[junit4] 2> 2997034 INFO (qtp30274821-27237) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard1 r:core_node7
x:hdfsbackuprestore_shard1_replica_t4] o.a.s.s.IndexSchema Loaded schema
minimal/1.1 with uniqueid field id
[junit4] 2> 2997034 INFO (qtp30274821-27237) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard1 r:core_node7
x:hdfsbackuprestore_shard1_replica_t4] o.a.s.c.CoreContainer Creating SolrCore
'hdfsbackuprestore_shard1_replica_t4' using configuration from collection
hdfsbackuprestore, trusted=true
[junit4] 2> 2997035 INFO (qtp30274821-27237) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard1 r:core_node7
x:hdfsbackuprestore_shard1_replica_t4] o.a.s.m.r.SolrJmxReporter JMX monitoring
for 'solr.core.hdfsbackuprestore.shard1.replica_t4' (registry
'solr.core.hdfsbackuprestore.shard1.replica_t4') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@22017d
[junit4] 2> 2997035 INFO (qtp30274821-27237) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard1 r:core_node7
x:hdfsbackuprestore_shard1_replica_t4] o.a.s.c.SolrCore
solr.RecoveryStrategy.Builder
[junit4] 2> 2997035 INFO (qtp30274821-27237) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard1 r:core_node7
x:hdfsbackuprestore_shard1_replica_t4] o.a.s.c.SolrCore
[[hdfsbackuprestore_shard1_replica_t4] ] Opening new SolrCore at
[/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.TestHdfsCloudBackupRestore_580AD52F77FA400A-001/tempDir-002/node1/hdfsbackuprestore_shard1_replica_t4],
dataDir=[/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.TestHdfsCloudBackupRestore_580AD52F77FA400A-001/tempDir-002/node1/./hdfsbackuprestore_shard1_replica_t4/data/]
[junit4] 2> 2997130 INFO (qtp933797-27181) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard1 r:core_node9
x:hdfsbackuprestore_shard1_replica_p6] o.a.s.u.CommitTracker Hard AutoCommit:
disabled
[junit4] 2> 2997130 INFO (qtp933797-27181) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard1 r:core_node9
x:hdfsbackuprestore_shard1_replica_p6] o.a.s.u.CommitTracker Soft AutoCommit:
disabled
[junit4] 2> 2997146 INFO (qtp933797-27233) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard2 r:core_node13
x:hdfsbackuprestore_shard2_replica_n10] o.a.s.u.UpdateHandler Using UpdateLog
implementation: org.apache.solr.update.UpdateLog
[junit4] 2> 2997146 INFO (qtp933797-27233) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard2 r:core_node13
x:hdfsbackuprestore_shard2_replica_n10] o.a.s.u.UpdateLog Initializing
UpdateLog: dataDir=null defaultSyncLevel=FLUSH numRecordsToKeep=100
maxNumLogsToKeep=10 numVersionBuckets=65536
[junit4] 2> 2997147 INFO (qtp933797-27181) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard1 r:core_node9
x:hdfsbackuprestore_shard1_replica_p6] o.a.s.s.SolrIndexSearcher Opening
[Searcher@75289a[hdfsbackuprestore_shard1_replica_p6] main]
[junit4] 2> 2997147 INFO (qtp933797-27235) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard2 r:core_node16
x:hdfsbackuprestore_shard2_replica_p14] o.a.s.u.CommitTracker Hard AutoCommit:
disabled
[junit4] 2> 2997147 INFO (qtp933797-27233) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard2 r:core_node13
x:hdfsbackuprestore_shard2_replica_n10] o.a.s.u.CommitTracker Hard AutoCommit:
disabled
[junit4] 2> 2997147 INFO (qtp933797-27235) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard2 r:core_node16
x:hdfsbackuprestore_shard2_replica_p14] o.a.s.u.CommitTracker Soft AutoCommit:
disabled
[junit4] 2> 2997147 INFO (qtp933797-27233) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard2 r:core_node13
x:hdfsbackuprestore_shard2_replica_n10] o.a.s.u.CommitTracker Soft AutoCommit:
disabled
[junit4] 2> 2997147 INFO (qtp30274821-27241) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard2 r:core_node15
x:hdfsbackuprestore_shard2_replica_t12] o.a.s.u.UpdateHandler Using UpdateLog
implementation: org.apache.solr.update.UpdateLog
[junit4] 2> 2997147 INFO (qtp30274821-27241) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard2 r:core_node15
x:hdfsbackuprestore_shard2_replica_t12] o.a.s.u.UpdateLog Initializing
UpdateLog: dataDir=null defaultSyncLevel=FLUSH numRecordsToKeep=100
maxNumLogsToKeep=10 numVersionBuckets=65536
[junit4] 2> 2997148 INFO (qtp30274821-27176) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard1 r:core_node3
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.UpdateHandler Using UpdateLog
implementation: org.apache.solr.update.UpdateLog
[junit4] 2> 2997148 INFO (qtp30274821-27176) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard1 r:core_node3
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.UpdateLog Initializing
UpdateLog: dataDir=null defaultSyncLevel=FLUSH numRecordsToKeep=100
maxNumLogsToKeep=10 numVersionBuckets=65536
[junit4] 2> 2997148 INFO (qtp933797-27182) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_n2] o.a.s.u.UpdateHandler Using UpdateLog
implementation: org.apache.solr.update.UpdateLog
[junit4] 2> 2997148 INFO (qtp933797-27182) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_n2] o.a.s.u.UpdateLog Initializing
UpdateLog: dataDir=null defaultSyncLevel=FLUSH numRecordsToKeep=100
maxNumLogsToKeep=10 numVersionBuckets=65536
[junit4] 2> 2997149 INFO (qtp30274821-27241) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard2 r:core_node15
x:hdfsbackuprestore_shard2_replica_t12] o.a.s.u.CommitTracker Hard AutoCommit:
disabled
[junit4] 2> 2997149 INFO (qtp30274821-27241) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard2 r:core_node15
x:hdfsbackuprestore_shard2_replica_t12] o.a.s.u.CommitTracker Soft AutoCommit:
disabled
[junit4] 2> 2997150 INFO (qtp30274821-27176) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard1 r:core_node3
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.CommitTracker Hard AutoCommit:
disabled
[junit4] 2> 2997150 INFO (qtp30274821-27176) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard1 r:core_node3
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.CommitTracker Soft AutoCommit:
disabled
[junit4] 2> 2997150 INFO (qtp933797-27233) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard2 r:core_node13
x:hdfsbackuprestore_shard2_replica_n10] o.a.s.s.SolrIndexSearcher Opening
[Searcher@bc4804[hdfsbackuprestore_shard2_replica_n10] main]
[junit4] 2> 2997150 INFO (qtp933797-27182) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_n2] o.a.s.u.CommitTracker Hard AutoCommit:
disabled
[junit4] 2> 2997150 INFO (qtp933797-27182) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_n2] o.a.s.u.CommitTracker Soft AutoCommit:
disabled
[junit4] 2> 2997151 INFO (qtp30274821-27241) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard2 r:core_node15
x:hdfsbackuprestore_shard2_replica_t12] o.a.s.s.SolrIndexSearcher Opening
[Searcher@135006d[hdfsbackuprestore_shard2_replica_t12] main]
[junit4] 2> 2997151 INFO (qtp30274821-27166) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard2 r:core_node11
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.u.UpdateHandler Using UpdateLog
implementation: org.apache.solr.update.UpdateLog
[junit4] 2> 2997151 INFO (qtp30274821-27166) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard2 r:core_node11
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.u.UpdateLog Initializing
UpdateLog: dataDir=null defaultSyncLevel=FLUSH numRecordsToKeep=100
maxNumLogsToKeep=10 numVersionBuckets=65536
[junit4] 2> 2997152 INFO (qtp933797-27181) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard1 r:core_node9
x:hdfsbackuprestore_shard1_replica_p6] o.a.s.r.ManagedResourceStorage
Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
[junit4] 2> 2997152 INFO (qtp933797-27233) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard2 r:core_node13
x:hdfsbackuprestore_shard2_replica_n10] o.a.s.r.ManagedResourceStorage
Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
[junit4] 2> 2997152 INFO (qtp30274821-27241) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard2 r:core_node15
x:hdfsbackuprestore_shard2_replica_t12] o.a.s.r.ManagedResourceStorage
Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
[junit4] 2> 2997152 INFO (qtp30274821-27241) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard2 r:core_node15
x:hdfsbackuprestore_shard2_replica_t12] o.a.s.r.ManagedResourceStorage Loaded
null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
[junit4] 2> 2997152 INFO (qtp933797-27181) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard1 r:core_node9
x:hdfsbackuprestore_shard1_replica_p6] o.a.s.r.ManagedResourceStorage Loaded
null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
[junit4] 2> 2997152 INFO (qtp933797-27233) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard2 r:core_node13
x:hdfsbackuprestore_shard2_replica_n10] o.a.s.r.ManagedResourceStorage Loaded
null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
[junit4] 2> 2997152 INFO (qtp30274821-27176) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard1 r:core_node3
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.s.SolrIndexSearcher Opening
[Searcher@11a6e1c[hdfsbackuprestore_shard1_replica_n1] main]
[junit4] 2> 2997152 INFO (qtp933797-27235) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard2 r:core_node16
x:hdfsbackuprestore_shard2_replica_p14] o.a.s.s.SolrIndexSearcher Opening
[Searcher@d5c2fe[hdfsbackuprestore_shard2_replica_p14] main]
[junit4] 2> 2997153 INFO (qtp30274821-27166) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard2 r:core_node11
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.u.CommitTracker Hard AutoCommit:
disabled
[junit4] 2> 2997153 INFO (qtp30274821-27166) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard2 r:core_node11
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.u.CommitTracker Soft AutoCommit:
disabled
[junit4] 2> 2997153 INFO (qtp30274821-27241) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard2 r:core_node15
x:hdfsbackuprestore_shard2_replica_t12] o.a.s.h.ReplicationHandler Commits will
be reserved for 10000ms.
[junit4] 2> 2997153 INFO (qtp933797-27233) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard2 r:core_node13
x:hdfsbackuprestore_shard2_replica_n10] o.a.s.h.ReplicationHandler Commits will
be reserved for 10000ms.
[junit4] 2> 2997153 INFO (qtp933797-27181) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard1 r:core_node9
x:hdfsbackuprestore_shard1_replica_p6] o.a.s.h.ReplicationHandler Commits will
be reserved for 10000ms.
[junit4] 2> 2997154 INFO (qtp933797-27235) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard2 r:core_node16
x:hdfsbackuprestore_shard2_replica_p14] o.a.s.r.ManagedResourceStorage
Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
[junit4] 2> 2997154 INFO (qtp933797-27182) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_n2] o.a.s.s.SolrIndexSearcher Opening
[Searcher@8ffc09[hdfsbackuprestore_shard1_replica_n2] main]
[junit4] 2> 2997154 INFO (qtp933797-27235) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard2 r:core_node16
x:hdfsbackuprestore_shard2_replica_p14] o.a.s.r.ManagedResourceStorage Loaded
null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
[junit4] 2> 2997155 INFO
(searcherExecutor-9371-thread-1-processing-n:127.0.0.1:41659_solr
x:hdfsbackuprestore_shard2_replica_n10 s:shard2 c:hdfsbackuprestore
r:core_node13) [n:127.0.0.1:41659_solr c:hdfsbackuprestore s:shard2
r:core_node13 x:hdfsbackuprestore_shard2_replica_n10] o.a.s.c.SolrCore
[hdfsbackuprestore_shard2_replica_n10] Registered new searcher
Searcher@bc4804[hdfsbackuprestore_shard2_replica_n10]
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
[junit4] 2> 2997155 INFO (qtp933797-27233) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard2 r:core_node13
x:hdfsbackuprestore_shard2_replica_n10] o.a.s.u.UpdateLog Could not find max
version in index or recent updates, using new clock 1581342622615601152
[junit4] 2> 2997155 INFO
(searcherExecutor-9375-thread-1-processing-n:127.0.0.1:46489_solr
x:hdfsbackuprestore_shard2_replica_t12 s:shard2 c:hdfsbackuprestore
r:core_node15) [n:127.0.0.1:46489_solr c:hdfsbackuprestore s:shard2
r:core_node15 x:hdfsbackuprestore_shard2_replica_t12] o.a.s.c.SolrCore
[hdfsbackuprestore_shard2_replica_t12] Registered new searcher
Searcher@135006d[hdfsbackuprestore_shard2_replica_t12]
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
[junit4] 2> 2997155 INFO
(searcherExecutor-9373-thread-1-processing-n:127.0.0.1:41659_solr
x:hdfsbackuprestore_shard1_replica_p6 s:shard1 c:hdfsbackuprestore
r:core_node9) [n:127.0.0.1:41659_solr c:hdfsbackuprestore s:shard1 r:core_node9
x:hdfsbackuprestore_shard1_replica_p6] o.a.s.c.SolrCore
[hdfsbackuprestore_shard1_replica_p6] Registered new searcher
Searcher@75289a[hdfsbackuprestore_shard1_replica_p6]
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
[junit4] 2> 2997155 INFO (qtp30274821-27241) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard2 r:core_node15
x:hdfsbackuprestore_shard2_replica_t12] o.a.s.u.UpdateLog Could not find max
version in index or recent updates, using new clock 1581342622615601152
[junit4] 2> 2997155 INFO (qtp933797-27181) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard1 r:core_node9
x:hdfsbackuprestore_shard1_replica_p6] o.a.s.c.ZkController
hdfsbackuprestore_shard1_replica_p6 starting background replication from leader
[junit4] 2> 2997155 INFO (qtp933797-27181) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard1 r:core_node9
x:hdfsbackuprestore_shard1_replica_p6] o.a.s.c.ReplicateFromLeader Will start
replication from leader with poll interval: 00:00:03
[junit4] 2> 2997156 INFO (qtp30274821-27166) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard2 r:core_node11
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.s.SolrIndexSearcher Opening
[Searcher@1fe67e1[hdfsbackuprestore_shard2_replica_n8] main]
[junit4] 2> 2997156 INFO (qtp933797-27182) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_n2] o.a.s.r.ManagedResourceStorage
Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
[junit4] 2> 2997156 INFO (qtp933797-27182) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_n2] o.a.s.r.ManagedResourceStorage Loaded
null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
[junit4] 2> 2997156 INFO (qtp30274821-27166) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard2 r:core_node11
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.r.ManagedResourceStorage
Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
[junit4] 2> 2997157 INFO (qtp30274821-27166) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard2 r:core_node11
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.r.ManagedResourceStorage Loaded
null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
[junit4] 2> 2997157 INFO (qtp933797-27235) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard2 r:core_node16
x:hdfsbackuprestore_shard2_replica_p14] o.a.s.h.ReplicationHandler Commits will
be reserved for 10000ms.
[junit4] 2> 2997158 INFO (qtp30274821-27166) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard2 r:core_node11
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.h.ReplicationHandler Commits will
be reserved for 10000ms.
[junit4] 2> 2997158 INFO (qtp933797-27182) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_n2] o.a.s.h.ReplicationHandler Commits will
be reserved for 10000ms.
[junit4] 2> 2997158 INFO (qtp30274821-27166) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard2 r:core_node11
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.u.UpdateLog Could not find max
version in index or recent updates, using new clock 1581342622618746880
[junit4] 2> 2997159 INFO
(searcherExecutor-9372-thread-1-processing-n:127.0.0.1:41659_solr
x:hdfsbackuprestore_shard2_replica_p14 s:shard2 c:hdfsbackuprestore
r:core_node16) [n:127.0.0.1:41659_solr c:hdfsbackuprestore s:shard2
r:core_node16 x:hdfsbackuprestore_shard2_replica_p14] o.a.s.c.SolrCore
[hdfsbackuprestore_shard2_replica_p14] Registered new searcher
Searcher@d5c2fe[hdfsbackuprestore_shard2_replica_p14]
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
[junit4] 2> 2997159 INFO (qtp933797-27235) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard2 r:core_node16
x:hdfsbackuprestore_shard2_replica_p14] o.a.s.c.ZkController
hdfsbackuprestore_shard2_replica_p14 starting background replication from leader
[junit4] 2> 2997159 INFO
(searcherExecutor-9374-thread-1-processing-n:127.0.0.1:41659_solr
x:hdfsbackuprestore_shard1_replica_n2 s:shard1 c:hdfsbackuprestore
r:core_node5) [n:127.0.0.1:41659_solr c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_n2] o.a.s.c.SolrCore
[hdfsbackuprestore_shard1_replica_n2] Registered new searcher
Searcher@8ffc09[hdfsbackuprestore_shard1_replica_n2]
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
[junit4] 2> 2997159 INFO (qtp933797-27235) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard2 r:core_node16
x:hdfsbackuprestore_shard2_replica_p14] o.a.s.c.ReplicateFromLeader Will start
replication from leader with poll interval: 00:00:03
[junit4] 2> 2997159 INFO (qtp933797-27182) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_n2] o.a.s.u.UpdateLog Could not find max
version in index or recent updates, using new clock 1581342622619795456
[junit4] 2> 2997159 INFO (qtp933797-27233) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard2 r:core_node13
x:hdfsbackuprestore_shard2_replica_n10] o.a.s.c.ShardLeaderElectionContext
Waiting until we see more replicas up for shard shard2: total=3 found=2
timeoutin=9999ms
[junit4] 2> 2997160 INFO
(searcherExecutor-9377-thread-1-processing-n:127.0.0.1:46489_solr
x:hdfsbackuprestore_shard2_replica_n8 s:shard2 c:hdfsbackuprestore
r:core_node11) [n:127.0.0.1:46489_solr c:hdfsbackuprestore s:shard2
r:core_node11 x:hdfsbackuprestore_shard2_replica_n8] o.a.s.c.SolrCore
[hdfsbackuprestore_shard2_replica_n8] Registered new searcher
Searcher@1fe67e1[hdfsbackuprestore_shard2_replica_n8]
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
[junit4] 2> 2997160 INFO (qtp933797-27235) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard2 r:core_node16
x:hdfsbackuprestore_shard2_replica_p14] o.a.s.h.ReplicationHandler Poll
scheduled at an interval of 3000ms
[junit4] 2> 2997159 INFO (qtp933797-27181) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard1 r:core_node9
x:hdfsbackuprestore_shard1_replica_p6] o.a.s.h.ReplicationHandler Poll
scheduled at an interval of 3000ms
[junit4] 2> 2997160 INFO (qtp933797-27235) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard2 r:core_node16
x:hdfsbackuprestore_shard2_replica_p14] o.a.s.h.ReplicationHandler Commits will
be reserved for 10000ms.
[junit4] 2> 2997160 INFO (qtp933797-27181) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard1 r:core_node9
x:hdfsbackuprestore_shard1_replica_p6] o.a.s.h.ReplicationHandler Commits will
be reserved for 10000ms.
[junit4] 2> 2997160 INFO (qtp30274821-27176) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard1 r:core_node3
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.r.ManagedResourceStorage
Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
[junit4] 2> 2997161 INFO (qtp30274821-27176) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard1 r:core_node3
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.r.ManagedResourceStorage Loaded
null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
[junit4] 2> 2997161 INFO (qtp30274821-27176) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard1 r:core_node3
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.h.ReplicationHandler Commits will
be reserved for 10000ms.
[junit4] 2> 2997162 INFO (qtp30274821-27237) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard1 r:core_node7
x:hdfsbackuprestore_shard1_replica_t4] o.a.s.u.UpdateHandler Using UpdateLog
implementation: org.apache.solr.update.UpdateLog
[junit4] 2> 2997162 INFO (qtp30274821-27237) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard1 r:core_node7
x:hdfsbackuprestore_shard1_replica_t4] o.a.s.u.UpdateLog Initializing
UpdateLog: dataDir=null defaultSyncLevel=FLUSH numRecordsToKeep=100
maxNumLogsToKeep=10 numVersionBuckets=65536
[junit4] 2> 2997163 INFO
(searcherExecutor-9376-thread-1-processing-n:127.0.0.1:46489_solr
x:hdfsbackuprestore_shard1_replica_n1 s:shard1 c:hdfsbackuprestore
r:core_node3) [n:127.0.0.1:46489_solr c:hdfsbackuprestore s:shard1 r:core_node3
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.SolrCore
[hdfsbackuprestore_shard1_replica_n1] Registered new searcher
Searcher@11a6e1c[hdfsbackuprestore_shard1_replica_n1]
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
[junit4] 2> 2997163 INFO (qtp30274821-27176) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard1 r:core_node3
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.UpdateLog Could not find max
version in index or recent updates, using new clock 1581342622623989760
[junit4] 2> 2997163 INFO (qtp30274821-27237) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard1 r:core_node7
x:hdfsbackuprestore_shard1_replica_t4] o.a.s.u.CommitTracker Hard AutoCommit:
disabled
[junit4] 2> 2997163 INFO (qtp30274821-27237) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard1 r:core_node7
x:hdfsbackuprestore_shard1_replica_t4] o.a.s.u.CommitTracker Soft AutoCommit:
disabled
[junit4] 2> 2997163 INFO (qtp933797-27182) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_n2] o.a.s.c.ShardLeaderElectionContext
Waiting until we see more replicas up for shard shard1: total=3 found=1
timeoutin=9999ms
[junit4] 2> 2997165 INFO (qtp30274821-27237) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard1 r:core_node7
x:hdfsbackuprestore_shard1_replica_t4] o.a.s.s.SolrIndexSearcher Opening
[Searcher@58a6c6[hdfsbackuprestore_shard1_replica_t4] main]
[junit4] 2> 2997166 INFO (qtp30274821-27237) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard1 r:core_node7
x:hdfsbackuprestore_shard1_replica_t4] o.a.s.r.ManagedResourceStorage
Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
[junit4] 2> 2997166 INFO (qtp30274821-27237) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard1 r:core_node7
x:hdfsbackuprestore_shard1_replica_t4] o.a.s.r.ManagedResourceStorage Loaded
null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
[junit4] 2> 2997167 INFO (qtp30274821-27237) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard1 r:core_node7
x:hdfsbackuprestore_shard1_replica_t4] o.a.s.h.ReplicationHandler Commits will
be reserved for 10000ms.
[junit4] 2> 2997168 INFO
(searcherExecutor-9378-thread-1-processing-n:127.0.0.1:46489_solr
x:hdfsbackuprestore_shard1_replica_t4 s:shard1 c:hdfsbackuprestore
r:core_node7) [n:127.0.0.1:46489_solr c:hdfsbackuprestore s:shard1 r:core_node7
x:hdfsbackuprestore_shard1_replica_t4] o.a.s.c.SolrCore
[hdfsbackuprestore_shard1_replica_t4] Registered new searcher
Searcher@58a6c6[hdfsbackuprestore_shard1_replica_t4]
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
[junit4] 2> 2997168 INFO (qtp30274821-27237) [n:127.0.0.1:46489_solr
c:hdfsbackuprestore s:shard1 r:core_node7
x:hdfsbackuprestore_shard1_replica_t4] o.a.s.u.UpdateLog Could not find max
version in index or recent updates, using new clock 1581342622629232640
[junit4] 2> 2997265 INFO
(zkCallback-4099-thread-2-processing-n:127.0.0.1:46489_solr)
[n:127.0.0.1:46489_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 2997265 INFO
(zkCallback-4099-thread-1-processing-n:127.0.0.1:46489_solr)
[n:127.0.0.1:46489_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 2997265 INFO
(zkCallback-4098-thread-1-processing-n:127.0.0.1:41659_solr)
[n:127.0.0.1:41659_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 2997661 INFO (qtp933797-27233) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard2 r:core_node13
x:hdfsbackuprestore_shard2_replica_n10] o.a.s.c.ShardLeaderElectionContext
Enough replicas found to continue.
[junit4] 2> 2997661 INFO (qtp933797-27233) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard2 r:core_node13
x:hdfsbackuprestore_shard2_replica_n10] o.a.s.c.ShardLeaderElectionContext I
may be the new leader - try and sync
[junit4] 2> 2997661 INFO (qtp933797-27233) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard2 r:core_node13
x:hdfsbackuprestore_shard2_replica_n10] o.a.s.c.SyncStrategy Sync replicas to
https://127.0.0.1:41659/solr/hdfsbackuprestore_shard2_replica_n10/
[junit4] 2> 2997662 INFO (qtp933797-27233) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard2 r:core_node13
x:hdfsbackuprestore_shard2_replica_n10] o.a.s.u.PeerSync PeerSync:
core=hdfsbackuprestore_shard2_replica_n10 url=https://127.0.0.1:41659/solr
START
replicas=[https://127.0.0.1:46489/solr/hdfsbackuprestore_shard2_replica_n8/,
https://127.0.0.1:46489/solr/hdfsbackuprestore_shard2_replica_t12/] nUpdates=100
[junit4] 2> 2997664 INFO (qtp933797-27182) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_n2] o.a.s.c.ShardLeaderElectionContext
Enough replicas found to continue.
[junit4] 2> 2997664 INFO (qtp933797-27182) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_n2] o.a.s.c.ShardLeaderElectionContext I may
be the new leader - try and sync
[junit4] 2> 2997664 INFO (qtp933797-27182) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_n2] o.a.s.c.SyncStrategy Sync replicas to
https://127.0.0.1:41659/solr/hdfsbackuprestore_shard1_replica_n2/
[junit4] 2> 2997665 INFO (qtp933797-27182) [n:127.0.0.1:41659_solr
c:hdfsbackuprestore
[...truncated too long message...]
rters for registry=solr.collection.multicollection1.shard2.leader, tag=29526414
[junit4] 2> 48625 INFO (coreCloseExecutor-224-thread-6)
[n:127.0.0.1:34185_solr c:foo s:shard1 r:core_node3 x:foo_shard1_replica_n1]
o.a.s.m.SolrMetricManager Closing metric reporters for
registry=solr.collection.foo.shard1.leader, tag=27595489
[junit4] 2> 48628 INFO (coreCloseExecutor-223-thread-1)
[n:127.0.0.1:42451_solr c:collection1 s:shard2 r:core_node4
x:collection1_shard2_replica_n2] o.a.s.m.SolrMetricManager Closing metric
reporters for registry=solr.collection.collection1.shard2.leader, tag=31803725
[junit4] 2> 48629 INFO (coreCloseExecutor-223-thread-8)
[n:127.0.0.1:42451_solr c:foo s:shard1 r:core_node6 x:foo_shard1_replica_n4]
o.a.s.m.SolrMetricManager Closing metric reporters for
registry=solr.core.foo.shard1.replica_n4, tag=10761977
[junit4] 2> 48629 INFO (coreCloseExecutor-223-thread-8)
[n:127.0.0.1:42451_solr c:foo s:shard1 r:core_node6 x:foo_shard1_replica_n4]
o.a.s.m.r.SolrJmxReporter Closing reporter
[org.apache.solr.metrics.reporters.SolrJmxReporter@15ccaa3: rootName =
solr_42451, domain = solr.core.foo.shard1.replica_n4, service url = null, agent
id = null] for registry solr.core.foo.shard1.replica_n4 /
com.codahale.metrics.MetricRegistry@13db825
[junit4] 2> 48639 INFO (coreCloseExecutor-222-thread-2)
[n:127.0.0.1:37509_solr c:localShardsTestColl s:shard3 r:core_node17
x:localShardsTestColl_shard3_replica_n14] o.a.s.m.SolrMetricManager Closing
metric reporters for
registry=solr.collection.localShardsTestColl.shard3.leader, tag=23336351
[junit4] 2> 48639 INFO (coreCloseExecutor-222-thread-3)
[n:127.0.0.1:37509_solr c:localShardsTestColl s:shard1 r:core_node5
x:localShardsTestColl_shard1_replica_n2] o.a.s.m.SolrMetricManager Closing
metric reporters for
registry=solr.collection.localShardsTestColl.shard1.leader, tag=20307006
[junit4] 2> 48639 INFO (coreCloseExecutor-222-thread-4)
[n:127.0.0.1:37509_solr c:localShardsTestColl s:shard2 r:core_node11
x:localShardsTestColl_shard2_replica_n8] o.a.s.m.SolrMetricManager Closing
metric reporters for
registry=solr.collection.localShardsTestColl.shard2.leader, tag=22182700
[junit4] 2> 48639 INFO (coreCloseExecutor-222-thread-5)
[n:127.0.0.1:37509_solr c:2nd_collection s:shard2 r:core_node4
x:2nd_collection_shard2_replica_n2] o.a.s.m.SolrMetricManager Closing metric
reporters for registry=solr.collection.2nd_collection.shard2.leader,
tag=10390740
[junit4] 2> 48639 INFO (coreCloseExecutor-222-thread-7)
[n:127.0.0.1:37509_solr c:foo s:shard1 r:core_node5 x:foo_shard1_replica_n2]
o.a.s.m.SolrMetricManager Closing metric reporters for
registry=solr.collection.foo.shard1.leader, tag=5068030
[junit4] 2> 48641 INFO (jetty-closer-56-thread-3) [ ] o.a.s.c.Overseer
Overseer (id=98833977124061193-127.0.0.1:34185_solr-n_0000000000) closing
[junit4] 2> 48641 INFO
(OverseerStateUpdate-98833977124061193-127.0.0.1:34185_solr-n_0000000000)
[n:127.0.0.1:34185_solr ] o.a.s.c.Overseer Overseer Loop exiting :
127.0.0.1:34185_solr
[junit4] 2> 48642 WARN
(OverseerAutoScalingTriggerThread-98833977124061193-127.0.0.1:34185_solr-n_0000000000)
[n:127.0.0.1:34185_solr ] o.a.s.c.a.OverseerTriggerThread
OverseerTriggerThread woken up but we are closed, exiting.
[junit4] 2> 48644 INFO
(zkCallback-70-thread-2-processing-n:127.0.0.1:37509_solr)
[n:127.0.0.1:37509_solr ] o.a.s.c.c.ZkStateReader Updated live nodes from
ZooKeeper... (3) -> (2)
[junit4] 2> 48645 INFO
(zkCallback-71-thread-1-processing-n:127.0.0.1:42451_solr)
[n:127.0.0.1:42451_solr ] o.a.s.c.c.ZkStateReader Updated live nodes from
ZooKeeper... (3) -> (1)
[junit4] 2> 48645 INFO
(zkCallback-71-thread-2-processing-n:127.0.0.1:42451_solr)
[n:127.0.0.1:42451_solr ] o.a.s.c.OverseerElectionContext I am going to be
the leader 127.0.0.1:42451_solr
[junit4] 2> 48650 INFO (coreCloseExecutor-223-thread-8)
[n:127.0.0.1:42451_solr c:foo s:shard1 r:core_node6 x:foo_shard1_replica_n4]
o.a.s.m.SolrMetricManager Closing metric reporters for
registry=solr.collection.foo.shard1.leader, tag=10761977
[junit4] 2> 48651 INFO (coreCloseExecutor-223-thread-3)
[n:127.0.0.1:42451_solr c:localShardsTestColl s:shard2 r:core_node13
x:localShardsTestColl_shard2_replica_n10] o.a.s.m.SolrMetricManager Closing
metric reporters for
registry=solr.collection.localShardsTestColl.shard2.leader, tag=19553995
[junit4] 2> 48651 INFO (coreCloseExecutor-223-thread-4)
[n:127.0.0.1:42451_solr c:localShardsTestColl s:shard3 r:core_node18
x:localShardsTestColl_shard3_replica_n16] o.a.s.m.SolrMetricManager Closing
metric reporters for
registry=solr.collection.localShardsTestColl.shard3.leader, tag=5482648
[junit4] 2> 48651 INFO (coreCloseExecutor-223-thread-5)
[n:127.0.0.1:42451_solr c:2nd_collection s:shard1 r:core_node3
x:2nd_collection_shard1_replica_n1] o.a.s.m.SolrMetricManager Closing metric
reporters for registry=solr.collection.2nd_collection.shard1.leader,
tag=14880837
[junit4] 2> 48651 INFO (coreCloseExecutor-223-thread-6)
[n:127.0.0.1:42451_solr c:multicollection1 s:shard1 r:core_node3
x:multicollection1_shard1_replica_n1] o.a.s.m.SolrMetricManager Closing metric
reporters for registry=solr.collection.multicollection1.shard1.leader,
tag=33347516
[junit4] 2> 48651 INFO (coreCloseExecutor-223-thread-7)
[n:127.0.0.1:42451_solr c:multicollection2 s:shard2 r:core_node4
x:multicollection2_shard2_replica_n2] o.a.s.m.SolrMetricManager Closing metric
reporters for registry=solr.collection.multicollection2.shard2.leader,
tag=8885329
[junit4] 2> 50145 WARN
(zkCallback-72-thread-3-processing-n:127.0.0.1:34185_solr)
[n:127.0.0.1:34185_solr ] o.a.s.c.c.ZkStateReader ZooKeeper watch triggered,
but Solr cannot talk to ZK: [KeeperErrorCode = Session expired for /live_nodes]
[junit4] 2> 50145 WARN
(zkCallback-70-thread-2-processing-n:127.0.0.1:37509_solr)
[n:127.0.0.1:37509_solr ] o.a.s.c.c.ZkStateReader ZooKeeper watch triggered,
but Solr cannot talk to ZK: [KeeperErrorCode = Session expired for /live_nodes]
[junit4] 2> 50145 INFO (jetty-closer-56-thread-3) [ ]
o.e.j.s.h.ContextHandler Stopped
o.e.j.s.ServletContextHandler@1c7e914{/solr,null,UNAVAILABLE}
[junit4] 2> 50145 INFO (jetty-closer-56-thread-2) [ ]
o.e.j.s.h.ContextHandler Stopped
o.e.j.s.ServletContextHandler@198aeca{/solr,null,UNAVAILABLE}
[junit4] 2> 50156 WARN
(zkCallback-71-thread-2-processing-n:127.0.0.1:42451_solr)
[n:127.0.0.1:42451_solr ] o.a.s.c.c.ZkStateReader ZooKeeper watch triggered,
but Solr cannot talk to ZK: [KeeperErrorCode = Session expired for /live_nodes]
[junit4] 2> 50156 INFO (jetty-closer-56-thread-1) [ ]
o.e.j.s.h.ContextHandler Stopped
o.e.j.s.ServletContextHandler@127d62e{/solr,null,UNAVAILABLE}
[junit4] 2> 50157 ERROR
(SUITE-CloudSolrClientTest-seed#[9E0A7DC4920EDD85]-worker) [ ]
o.a.z.s.ZooKeeperServer ZKShutdownHandler is not registered, so ZooKeeper
server won't take any action on ERROR or SHUTDOWN server state changes
[junit4] 2> 50158 INFO
(SUITE-CloudSolrClientTest-seed#[9E0A7DC4920EDD85]-worker) [ ]
o.a.s.c.ZkTestServer connecting to 127.0.0.1:36211 36211
[junit4] 2> 55943 INFO (Thread-66) [ ] o.a.s.c.ZkTestServer connecting
to 127.0.0.1:36211 36211
[junit4] 2> 55944 WARN (Thread-66) [ ] o.a.s.c.ZkTestServer Watch
limit violations:
[junit4] 2> Maximum concurrent create/delete watches above limit:
[junit4] 2>
[junit4] 2> 7 /solr/aliases.json
[junit4] 2> 7 /solr/clusterprops.json
[junit4] 2> 3 /solr/security.json
[junit4] 2> 3 /solr/configs/conf
[junit4] 2>
[junit4] 2> Maximum concurrent data watches above limit:
[junit4] 2>
[junit4] 2> 27 /solr/collections/localShardsTestColl/state.json
[junit4] 2> 21 /solr/collections/foo/state.json
[junit4] 2> 12 /solr/collections/collection1/state.json
[junit4] 2> 12 /solr/collections/2nd_collection/state.json
[junit4] 2> 12 /solr/collections/multicollection2/state.json
[junit4] 2> 12 /solr/collections/multicollection1/state.json
[junit4] 2> 7 /solr/clusterstate.json
[junit4] 2> 6 /solr/collections/overwrite/state.json
[junit4] 2>
[junit4] 2> Maximum concurrent children watches above limit:
[junit4] 2>
[junit4] 2> 7 /solr/live_nodes
[junit4] 2> 7 /solr/collections
[junit4] 2>
[junit4] 2> NOTE: leaving temporary files on disk at:
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-solrj/test/J1/temp/solr.client.solrj.impl.CloudSolrClientTest_9E0A7DC4920EDD85-001
[junit4] 2> NOTE: test params are: codec=Asserting(Lucene70):
{multiDefault=FSTOrd50, a_t=PostingsFormat(name=MockRandom), title_s=FSTOrd50,
id=PostingsFormat(name=LuceneVarGapDocFreqInterval),
text=Lucene50(blocksize=128)},
docValues:{_version_=DocValuesFormat(name=Lucene70),
multiDefault=DocValuesFormat(name=Memory),
title_s=DocValuesFormat(name=Memory),
intDefault=DocValuesFormat(name=Lucene70), id=DocValuesFormat(name=Lucene70),
timestamp=DocValuesFormat(name=Lucene70)}, maxPointsInLeafNode=1208,
maxMBSortInHeap=5.913503147370583, sim=RandomSimilarity(queryNorm=false): {},
locale=nl-BE, timezone=Australia/Victoria
[junit4] 2> NOTE: Linux 4.10.0-33-generic i386/Oracle Corporation
1.8.0_144 (32-bit)/cpus=8,threads=1,free=33375616,total=169459712
[junit4] 2> NOTE: All tests run in this JVM: [AddEvaluatorTest,
SolrExampleStreamingBinaryTest, GetByIdTest, LessThanEqualToEvaluatorTest,
SolrQueryTest, ArrayEvaluatorTest, GreaterThanEqualToEvaluatorTest,
TestSolrJErrorHandling, HyperbolicSineEvaluatorTest, RegressionEvaluatorTest,
TestJsonRecordReader, TestUpdateRequestCodec, TangentEvaluatorTest,
CloudSolrClientBuilderTest, OperationsTest, MergeIndexesEmbeddedTest,
TestFastInputStream, CloudSolrClientTest]
[junit4] Completed [67/145 (1!)] on J1 in 31.65s, 15 tests, 1 error <<<
FAILURES!
[...truncated 42054 lines...]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]