Build: https://builds.apache.org/job/Lucene-Solr-Tests-7.x/343/
4 tests failed.
FAILED:
org.apache.solr.cloud.ConcurrentCreateRoutedAliasTest.testConcurrentCreateRoutedAliasMinimal
Error Message:
concurrent alias creation failed java.lang.IllegalStateException: Connection
pool shut down
Stack Trace:
java.lang.AssertionError: concurrent alias creation failed
java.lang.IllegalStateException: Connection pool shut down
at
__randomizedtesting.SeedInfo.seed([963C9F53888D1D35:52A793012592AA0F]:0)
at org.junit.Assert.fail(Assert.java:93)
at org.junit.Assert.assertTrue(Assert.java:43)
at org.junit.Assert.assertNull(Assert.java:551)
at
org.apache.solr.cloud.ConcurrentCreateRoutedAliasTest.testConcurrentCreateRoutedAliasMinimal(ConcurrentCreateRoutedAliasTest.java:108)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at
com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984)
at
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at
org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
at
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at
org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
at
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468)
at
com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41)
at
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
at
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at
org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at java.lang.Thread.run(Thread.java:748)
FAILED: org.apache.solr.cloud.api.collections.TestHdfsCloudBackupRestore.test
Error Message:
expected:<COMPLETED> but was:<RUNNING>
Stack Trace:
java.lang.AssertionError: expected:<COMPLETED> but was:<RUNNING>
at
__randomizedtesting.SeedInfo.seed([963C9F53888D1D35:1E68A089267170CD]:0)
at org.junit.Assert.fail(Assert.java:93)
at org.junit.Assert.failNotEquals(Assert.java:647)
at org.junit.Assert.assertEquals(Assert.java:128)
at org.junit.Assert.assertEquals(Assert.java:147)
at
org.apache.solr.cloud.api.collections.AbstractCloudBackupRestoreTestCase.testBackupAndRestore(AbstractCloudBackupRestoreTestCase.java:289)
at
org.apache.solr.cloud.api.collections.AbstractCloudBackupRestoreTestCase.test(AbstractCloudBackupRestoreTestCase.java:142)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at
com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984)
at
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at
org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
at
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at
org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
at
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468)
at
com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41)
at
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
at
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at
org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at java.lang.Thread.run(Thread.java:748)
FAILED:
org.apache.solr.cloud.autoscaling.TriggerIntegrationTest.testMetricTrigger
Error Message:
Stack Trace:
java.lang.AssertionError
at
__randomizedtesting.SeedInfo.seed([963C9F53888D1D35:2C30A8DCD765CB7A]:0)
at org.junit.Assert.fail(Assert.java:92)
at org.junit.Assert.assertTrue(Assert.java:43)
at org.junit.Assert.assertNull(Assert.java:551)
at org.junit.Assert.assertNull(Assert.java:562)
at
org.apache.solr.cloud.autoscaling.TriggerIntegrationTest.testMetricTrigger(TriggerIntegrationTest.java:1575)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at
com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984)
at
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at
org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
at
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at
org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
at
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468)
at
com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41)
at
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
at
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at
org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at java.lang.Thread.run(Thread.java:748)
FAILED: org.apache.solr.cloud.autoscaling.sim.TestLargeCluster.testSearchRate
Error Message:
The trigger did not fire at all
Stack Trace:
java.lang.AssertionError: The trigger did not fire at all
at
__randomizedtesting.SeedInfo.seed([963C9F53888D1D35:CB7481DA474BBB7A]:0)
at org.junit.Assert.fail(Assert.java:93)
at org.junit.Assert.assertTrue(Assert.java:43)
at
org.apache.solr.cloud.autoscaling.sim.TestLargeCluster.testSearchRate(TestLargeCluster.java:547)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at
com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984)
at
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at
org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
at
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at
org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
at
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468)
at
com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41)
at
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
at
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at
org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at java.lang.Thread.run(Thread.java:748)
Build Log:
[...truncated 11947 lines...]
[junit4] Suite:
org.apache.solr.cloud.api.collections.TestHdfsCloudBackupRestore
[junit4] 2> Creating dataDir:
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_963C9F53888D1D35-001/init-core-data-001
[junit4] 2> 917264 WARN
(SUITE-TestHdfsCloudBackupRestore-seed#[963C9F53888D1D35]-worker) [ ]
o.a.s.SolrTestCaseJ4 startTrackingSearchers: numOpens=4 numCloses=4
[junit4] 2> 917264 INFO
(SUITE-TestHdfsCloudBackupRestore-seed#[963C9F53888D1D35]-worker) [ ]
o.a.s.SolrTestCaseJ4 Using PointFields (NUMERIC_POINTS_SYSPROP=true)
w/NUMERIC_DOCVALUES_SYSPROP=true
[junit4] 2> 917301 INFO
(SUITE-TestHdfsCloudBackupRestore-seed#[963C9F53888D1D35]-worker) [ ]
o.a.s.SolrTestCaseJ4 Randomized ssl (false) and clientAuth (false) via:
@org.apache.solr.util.RandomizeSSL(reason=, ssl=NaN, value=NaN, clientAuth=NaN)
[junit4] 2> 917302 INFO
(SUITE-TestHdfsCloudBackupRestore-seed#[963C9F53888D1D35]-worker) [ ]
o.a.s.SolrTestCaseJ4 SecureRandom sanity checks:
test.solr.allowed.securerandom=null & java.security.egd=file:/dev/./urandom
[junit4] 1> Formatting using clusterid: testClusterID
[junit4] 2> 919775 WARN
(SUITE-TestHdfsCloudBackupRestore-seed#[963C9F53888D1D35]-worker) [ ]
o.a.h.m.i.MetricsConfig Cannot locate configuration: tried
hadoop-metrics2-namenode.properties,hadoop-metrics2.properties
[junit4] 2> 919993 WARN
(SUITE-TestHdfsCloudBackupRestore-seed#[963C9F53888D1D35]-worker) [ ]
o.a.h.h.HttpRequestLog Jetty request log can only be enabled using Log4j
[junit4] 2> 920000 INFO
(SUITE-TestHdfsCloudBackupRestore-seed#[963C9F53888D1D35]-worker) [ ]
o.m.log jetty-6.1.26
[junit4] 2> 920156 INFO
(SUITE-TestHdfsCloudBackupRestore-seed#[963C9F53888D1D35]-worker) [ ]
o.m.log Extract
jar:file:/x1/jenkins/.ivy2/cache/org.apache.hadoop/hadoop-hdfs/tests/hadoop-hdfs-2.7.4-tests.jar!/webapps/hdfs
to ./temp/Jetty_localhost_51411_hdfs____vvszqa/webapp
[junit4] 2> 926743 INFO
(SUITE-TestHdfsCloudBackupRestore-seed#[963C9F53888D1D35]-worker) [ ]
o.m.log Started
HttpServer2$SelectChannelConnectorWithSafeStartup@localhost:51411
[junit4] 2> 929257 WARN
(SUITE-TestHdfsCloudBackupRestore-seed#[963C9F53888D1D35]-worker) [ ]
o.a.h.h.HttpRequestLog Jetty request log can only be enabled using Log4j
[junit4] 2> 929259 INFO
(SUITE-TestHdfsCloudBackupRestore-seed#[963C9F53888D1D35]-worker) [ ]
o.m.log jetty-6.1.26
[junit4] 2> 929359 INFO
(SUITE-TestHdfsCloudBackupRestore-seed#[963C9F53888D1D35]-worker) [ ]
o.m.log Extract
jar:file:/x1/jenkins/.ivy2/cache/org.apache.hadoop/hadoop-hdfs/tests/hadoop-hdfs-2.7.4-tests.jar!/webapps/datanode
to ./temp/Jetty_localhost_47051_datanode____.ll8ncg/webapp
[junit4] 2> 935682 INFO
(SUITE-TestHdfsCloudBackupRestore-seed#[963C9F53888D1D35]-worker) [ ]
o.m.log Started
HttpServer2$SelectChannelConnectorWithSafeStartup@localhost:47051
[junit4] 2> 936844 WARN
(SUITE-TestHdfsCloudBackupRestore-seed#[963C9F53888D1D35]-worker) [ ]
o.a.h.h.HttpRequestLog Jetty request log can only be enabled using Log4j
[junit4] 2> 936850 INFO
(SUITE-TestHdfsCloudBackupRestore-seed#[963C9F53888D1D35]-worker) [ ]
o.m.log jetty-6.1.26
[junit4] 2> 937262 INFO
(SUITE-TestHdfsCloudBackupRestore-seed#[963C9F53888D1D35]-worker) [ ]
o.m.log Extract
jar:file:/x1/jenkins/.ivy2/cache/org.apache.hadoop/hadoop-hdfs/tests/hadoop-hdfs-2.7.4-tests.jar!/webapps/datanode
to ./temp/Jetty_localhost_38677_datanode____.josk94/webapp
[junit4] 2> 938918 ERROR (DataNode:
[[[DISK]file:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_963C9F53888D1D35-001/tempDir-001/hdfsBaseDir/data/data1/,
[DISK]file:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_963C9F53888D1D35-001/tempDir-001/hdfsBaseDir/data/data2/]]
heartbeating to localhost/127.0.0.1:32939) [ ]
o.a.h.h.s.d.DirectoryScanner
dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value below 1
ms/sec. Assuming default value of 1000
[junit4] 2> 938995 INFO (Block report processor) [ ] BlockStateChange
BLOCK* processReport 0x3e857f447dce87: from storage
DS-85f543fc-ae18-4654-8ea7-457d58eb022b node
DatanodeRegistration(127.0.0.1:56062,
datanodeUuid=3efa0368-b7ff-4cc5-9a36-8589c6421d1d, infoPort=59133,
infoSecurePort=0, ipcPort=40369,
storageInfo=lv=-56;cid=testClusterID;nsid=1396815523;c=0), blocks: 0,
hasStaleStorage: true, processing time: 0 msecs
[junit4] 2> 938995 INFO (Block report processor) [ ] BlockStateChange
BLOCK* processReport 0x3e857f447dce87: from storage
DS-d2ca3cce-9ac1-4d21-af71-0b436165d624 node
DatanodeRegistration(127.0.0.1:56062,
datanodeUuid=3efa0368-b7ff-4cc5-9a36-8589c6421d1d, infoPort=59133,
infoSecurePort=0, ipcPort=40369,
storageInfo=lv=-56;cid=testClusterID;nsid=1396815523;c=0), blocks: 0,
hasStaleStorage: false, processing time: 0 msecs
[junit4] 2> 941957 INFO
(SUITE-TestHdfsCloudBackupRestore-seed#[963C9F53888D1D35]-worker) [ ]
o.m.log Started
HttpServer2$SelectChannelConnectorWithSafeStartup@localhost:38677
[junit4] 2> 947319 ERROR (DataNode:
[[[DISK]file:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_963C9F53888D1D35-001/tempDir-001/hdfsBaseDir/data/data3/,
[DISK]file:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_963C9F53888D1D35-001/tempDir-001/hdfsBaseDir/data/data4/]]
heartbeating to localhost/127.0.0.1:32939) [ ]
o.a.h.h.s.d.DirectoryScanner
dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value below 1
ms/sec. Assuming default value of 1000
[junit4] 2> 947477 INFO (Block report processor) [ ] BlockStateChange
BLOCK* processReport 0x3e85813cbddc44: from storage
DS-f018afea-3b42-41f5-8833-bcfa764f588a node
DatanodeRegistration(127.0.0.1:45986,
datanodeUuid=e7c0d423-4302-4585-805e-0a6cf4aef7dc, infoPort=48285,
infoSecurePort=0, ipcPort=42023,
storageInfo=lv=-56;cid=testClusterID;nsid=1396815523;c=0), blocks: 0,
hasStaleStorage: true, processing time: 16 msecs
[junit4] 2> 947477 INFO (Block report processor) [ ] BlockStateChange
BLOCK* processReport 0x3e85813cbddc44: from storage
DS-5e3846f9-fd92-46cd-81a4-2b9391328869 node
DatanodeRegistration(127.0.0.1:45986,
datanodeUuid=e7c0d423-4302-4585-805e-0a6cf4aef7dc, infoPort=48285,
infoSecurePort=0, ipcPort=42023,
storageInfo=lv=-56;cid=testClusterID;nsid=1396815523;c=0), blocks: 0,
hasStaleStorage: false, processing time: 0 msecs
[junit4] 2> 949656 INFO
(SUITE-TestHdfsCloudBackupRestore-seed#[963C9F53888D1D35]-worker) [ ]
o.a.s.c.MiniSolrCloudCluster Starting cluster of 2 servers in
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_963C9F53888D1D35-001/tempDir-002
[junit4] 2> 949656 INFO
(SUITE-TestHdfsCloudBackupRestore-seed#[963C9F53888D1D35]-worker) [ ]
o.a.s.c.ZkTestServer STARTING ZK TEST SERVER
[junit4] 2> 949657 INFO (Thread-875) [ ] o.a.s.c.ZkTestServer client
port:0.0.0.0/0.0.0.0:0
[junit4] 2> 949657 INFO (Thread-875) [ ] o.a.s.c.ZkTestServer Starting
server
[junit4] 2> 949674 ERROR (Thread-875) [ ] o.a.z.s.ZooKeeperServer
ZKShutdownHandler is not registered, so ZooKeeper server won't take any action
on ERROR or SHUTDOWN server state changes
[junit4] 2> 949758 INFO
(SUITE-TestHdfsCloudBackupRestore-seed#[963C9F53888D1D35]-worker) [ ]
o.a.s.c.ZkTestServer start zk server on port:46976
[junit4] 2> 950061 INFO (zkConnectionManagerCallback-158-thread-1) [ ]
o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 950134 INFO (jetty-launcher-155-thread-1) [ ]
o.e.j.s.Server jetty-9.4.8.v20171121, build timestamp:
2017-11-22T00:27:37+03:00, git hash: 82b8fb23f757335bb3329d540ce37a2a2615f0a8
[junit4] 2> 950146 INFO (jetty-launcher-155-thread-2) [ ]
o.e.j.s.Server jetty-9.4.8.v20171121, build timestamp:
2017-11-22T00:27:37+03:00, git hash: 82b8fb23f757335bb3329d540ce37a2a2615f0a8
[junit4] 2> 950178 INFO (jetty-launcher-155-thread-2) [ ]
o.e.j.s.session DefaultSessionIdManager workerName=node0
[junit4] 2> 950178 INFO (jetty-launcher-155-thread-2) [ ]
o.e.j.s.session No SessionScavenger set, using defaults
[junit4] 2> 950178 INFO (jetty-launcher-155-thread-2) [ ]
o.e.j.s.session Scavenging every 660000ms
[junit4] 2> 950179 INFO (jetty-launcher-155-thread-2) [ ]
o.e.j.s.h.ContextHandler Started
o.e.j.s.ServletContextHandler@3f6699ad{/solr,null,AVAILABLE}
[junit4] 2> 950179 INFO (jetty-launcher-155-thread-2) [ ]
o.e.j.s.AbstractConnector Started
ServerConnector@d12c520{HTTP/1.1,[http/1.1]}{127.0.0.1:56822}
[junit4] 2> 950179 INFO (jetty-launcher-155-thread-2) [ ]
o.e.j.s.Server Started @984505ms
[junit4] 2> 950179 INFO (jetty-launcher-155-thread-2) [ ]
o.a.s.c.s.e.JettySolrRunner Jetty properties: {hostContext=/solr,
hostPort=56822}
[junit4] 2> 950180 ERROR (jetty-launcher-155-thread-2) [ ]
o.a.s.u.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be
missing or incomplete.
[junit4] 2> 950180 INFO (jetty-launcher-155-thread-2) [ ]
o.a.s.s.SolrDispatchFilter ___ _ Welcome to Apache Solr? version
7.3.0
[junit4] 2> 950180 INFO (jetty-launcher-155-thread-2) [ ]
o.a.s.s.SolrDispatchFilter / __| ___| |_ _ Starting in cloud mode on port null
[junit4] 2> 950180 INFO (jetty-launcher-155-thread-2) [ ]
o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_| Install dir: null
[junit4] 2> 950180 INFO (jetty-launcher-155-thread-2) [ ]
o.a.s.s.SolrDispatchFilter |___/\___/_|_| Start time:
2018-01-30T21:05:27.580Z
[junit4] 2> 950182 INFO (jetty-launcher-155-thread-1) [ ]
o.e.j.s.session DefaultSessionIdManager workerName=node0
[junit4] 2> 950182 INFO (jetty-launcher-155-thread-1) [ ]
o.e.j.s.session No SessionScavenger set, using defaults
[junit4] 2> 950182 INFO (jetty-launcher-155-thread-1) [ ]
o.e.j.s.session Scavenging every 660000ms
[junit4] 2> 950254 INFO (jetty-launcher-155-thread-1) [ ]
o.e.j.s.h.ContextHandler Started
o.e.j.s.ServletContextHandler@75df0b21{/solr,null,AVAILABLE}
[junit4] 2> 950256 INFO (jetty-launcher-155-thread-1) [ ]
o.e.j.s.AbstractConnector Started
ServerConnector@49915d65{HTTP/1.1,[http/1.1]}{127.0.0.1:45076}
[junit4] 2> 950256 INFO (jetty-launcher-155-thread-1) [ ]
o.e.j.s.Server Started @984582ms
[junit4] 2> 950256 INFO (jetty-launcher-155-thread-1) [ ]
o.a.s.c.s.e.JettySolrRunner Jetty properties: {hostContext=/solr,
hostPort=45076}
[junit4] 2> 950257 ERROR (jetty-launcher-155-thread-1) [ ]
o.a.s.u.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be
missing or incomplete.
[junit4] 2> 950292 INFO (jetty-launcher-155-thread-1) [ ]
o.a.s.s.SolrDispatchFilter ___ _ Welcome to Apache Solr? version
7.3.0
[junit4] 2> 950292 INFO (jetty-launcher-155-thread-1) [ ]
o.a.s.s.SolrDispatchFilter / __| ___| |_ _ Starting in cloud mode on port null
[junit4] 2> 950292 INFO (jetty-launcher-155-thread-1) [ ]
o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_| Install dir: null
[junit4] 2> 950292 INFO (jetty-launcher-155-thread-1) [ ]
o.a.s.s.SolrDispatchFilter |___/\___/_|_| Start time:
2018-01-30T21:05:27.692Z
[junit4] 2> 950736 INFO (zkConnectionManagerCallback-162-thread-1) [ ]
o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 950910 INFO (jetty-launcher-155-thread-1) [ ]
o.a.s.s.SolrDispatchFilter solr.xml found in ZooKeeper. Loading...
[junit4] 2> 950944 INFO (jetty-launcher-155-thread-1) [ ]
o.a.s.c.SolrXmlConfig MBean server found:
com.sun.jmx.mbeanserver.JmxMBeanServer@52a4746c, but no JMX reporters were
configured - adding default JMX reporter.
[junit4] 2> 950962 INFO (jetty-launcher-155-thread-1) [ ]
o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:46976/solr
[junit4] 2> 950963 INFO (zkConnectionManagerCallback-160-thread-1) [ ]
o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 951022 INFO (jetty-launcher-155-thread-2) [ ]
o.a.s.s.SolrDispatchFilter solr.xml found in ZooKeeper. Loading...
[junit4] 2> 951036 INFO (jetty-launcher-155-thread-2) [ ]
o.a.s.c.SolrXmlConfig MBean server found:
com.sun.jmx.mbeanserver.JmxMBeanServer@52a4746c, but no JMX reporters were
configured - adding default JMX reporter.
[junit4] 2> 951110 INFO (jetty-launcher-155-thread-2) [ ]
o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:46976/solr
[junit4] 2> 951181 INFO (zkConnectionManagerCallback-168-thread-1) [ ]
o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 951216 INFO (zkConnectionManagerCallback-170-thread-1) [ ]
o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 951514 INFO
(zkConnectionManagerCallback-172-thread-1-processing-n:127.0.0.1:45076_solr)
[n:127.0.0.1:45076_solr ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 951670 INFO
(zkConnectionManagerCallback-174-thread-1-processing-n:127.0.0.1:56822_solr)
[n:127.0.0.1:56822_solr ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 956631 INFO (jetty-launcher-155-thread-2)
[n:127.0.0.1:56822_solr ] o.a.s.c.Overseer Overseer (id=null) closing
[junit4] 2> 956633 INFO (jetty-launcher-155-thread-2)
[n:127.0.0.1:56822_solr ] o.a.s.c.OverseerElectionContext I am going to be
the leader 127.0.0.1:56822_solr
[junit4] 2> 956687 INFO (jetty-launcher-155-thread-2)
[n:127.0.0.1:56822_solr ] o.a.s.c.Overseer Overseer
(id=73210912353026054-127.0.0.1:56822_solr-n_0000000000) starting
[junit4] 2> 956985 INFO (jetty-launcher-155-thread-2)
[n:127.0.0.1:56822_solr ] o.a.s.c.ZkController Register node as live in
ZooKeeper:/live_nodes/127.0.0.1:56822_solr
[junit4] 2> 957069 INFO
(zkCallback-173-thread-1-processing-n:127.0.0.1:56822_solr)
[n:127.0.0.1:56822_solr ] o.a.s.c.c.ZkStateReader Updated live nodes from
ZooKeeper... (0) -> (1)
[junit4] 2> 957633 INFO (jetty-launcher-155-thread-2)
[n:127.0.0.1:56822_solr ] o.a.s.c.b.r.BackupRepositoryFactory Added backup
repository with configuration params {type = repository,name = hdfs,class =
org.apache.solr.core.backup.repository.HdfsBackupRepository,attributes =
{name=hdfs,
class=org.apache.solr.core.backup.repository.HdfsBackupRepository},args =
{location=/backup,solr.hdfs.home=hdfs://localhost:32939/solr,solr.hdfs.confdir=}}
[junit4] 2> 957633 INFO (jetty-launcher-155-thread-2)
[n:127.0.0.1:56822_solr ] o.a.s.c.b.r.BackupRepositoryFactory Default
configuration for backup repository is with configuration params {type =
repository,name = hdfs,class =
org.apache.solr.core.backup.repository.HdfsBackupRepository,attributes =
{name=hdfs,
class=org.apache.solr.core.backup.repository.HdfsBackupRepository},args =
{location=/backup,solr.hdfs.home=hdfs://localhost:32939/solr,solr.hdfs.confdir=}}
[junit4] 2> 958444 INFO (jetty-launcher-155-thread-2)
[n:127.0.0.1:56822_solr ] o.a.s.m.r.SolrJmxReporter JMX monitoring for
'solr.node' (registry 'solr.node') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@52a4746c
[junit4] 2> 958710 INFO (jetty-launcher-155-thread-1)
[n:127.0.0.1:45076_solr ] o.a.s.c.c.ZkStateReader Updated live nodes from
ZooKeeper... (0) -> (1)
[junit4] 2> 958720 INFO (jetty-launcher-155-thread-1)
[n:127.0.0.1:45076_solr ] o.a.s.c.Overseer Overseer (id=null) closing
[junit4] 2> 958728 INFO (jetty-launcher-155-thread-2)
[n:127.0.0.1:56822_solr ] o.a.s.m.r.SolrJmxReporter JMX monitoring for
'solr.jvm' (registry 'solr.jvm') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@52a4746c
[junit4] 2> 958728 INFO (jetty-launcher-155-thread-2)
[n:127.0.0.1:56822_solr ] o.a.s.m.r.SolrJmxReporter JMX monitoring for
'solr.jetty' (registry 'solr.jetty') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@52a4746c
[junit4] 2> 958741 INFO (jetty-launcher-155-thread-1)
[n:127.0.0.1:45076_solr ] o.a.s.c.ZkController Register node as live in
ZooKeeper:/live_nodes/127.0.0.1:45076_solr
[junit4] 2> 958742 INFO
(zkCallback-173-thread-1-processing-n:127.0.0.1:56822_solr)
[n:127.0.0.1:56822_solr ] o.a.s.c.c.ZkStateReader Updated live nodes from
ZooKeeper... (1) -> (2)
[junit4] 2> 958808 INFO (jetty-launcher-155-thread-2)
[n:127.0.0.1:56822_solr ] o.a.s.c.CorePropertiesLocator Found 0 core
definitions underneath
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_963C9F53888D1D35-001/tempDir-002/node2/.
[junit4] 2> 959161 INFO
(zkCallback-171-thread-1-processing-n:127.0.0.1:45076_solr)
[n:127.0.0.1:45076_solr ] o.a.s.c.c.ZkStateReader Updated live nodes from
ZooKeeper... (1) -> (2)
[junit4] 2> 960062 INFO (jetty-launcher-155-thread-1)
[n:127.0.0.1:45076_solr ] o.a.s.c.b.r.BackupRepositoryFactory Added backup
repository with configuration params {type = repository,name = hdfs,class =
org.apache.solr.core.backup.repository.HdfsBackupRepository,attributes =
{name=hdfs,
class=org.apache.solr.core.backup.repository.HdfsBackupRepository},args =
{location=/backup,solr.hdfs.home=hdfs://localhost:32939/solr,solr.hdfs.confdir=}}
[junit4] 2> 960065 INFO (jetty-launcher-155-thread-1)
[n:127.0.0.1:45076_solr ] o.a.s.c.b.r.BackupRepositoryFactory Default
configuration for backup repository is with configuration params {type =
repository,name = hdfs,class =
org.apache.solr.core.backup.repository.HdfsBackupRepository,attributes =
{name=hdfs,
class=org.apache.solr.core.backup.repository.HdfsBackupRepository},args =
{location=/backup,solr.hdfs.home=hdfs://localhost:32939/solr,solr.hdfs.confdir=}}
[junit4] 2> 960734 INFO (jetty-launcher-155-thread-1)
[n:127.0.0.1:45076_solr ] o.a.s.m.r.SolrJmxReporter JMX monitoring for
'solr.node' (registry 'solr.node') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@52a4746c
[junit4] 2> 961038 INFO (jetty-launcher-155-thread-1)
[n:127.0.0.1:45076_solr ] o.a.s.m.r.SolrJmxReporter JMX monitoring for
'solr.jvm' (registry 'solr.jvm') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@52a4746c
[junit4] 2> 961038 INFO (jetty-launcher-155-thread-1)
[n:127.0.0.1:45076_solr ] o.a.s.m.r.SolrJmxReporter JMX monitoring for
'solr.jetty' (registry 'solr.jetty') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@52a4746c
[junit4] 2> 961069 INFO (jetty-launcher-155-thread-1)
[n:127.0.0.1:45076_solr ] o.a.s.c.CorePropertiesLocator Found 0 core
definitions underneath
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_963C9F53888D1D35-001/tempDir-002/node1/.
[junit4] 2> 961771 INFO (zkConnectionManagerCallback-180-thread-1) [ ]
o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 961940 INFO (zkConnectionManagerCallback-184-thread-1) [ ]
o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 961946 INFO
(SUITE-TestHdfsCloudBackupRestore-seed#[963C9F53888D1D35]-worker) [ ]
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (2)
[junit4] 2> 961949 INFO
(SUITE-TestHdfsCloudBackupRestore-seed#[963C9F53888D1D35]-worker) [ ]
o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:46976/solr ready
[junit4] 2> 962521 INFO
(TEST-TestHdfsCloudBackupRestore.test-seed#[963C9F53888D1D35]) [ ]
o.a.s.SolrTestCaseJ4 ###Starting test
[junit4] 2> 962583 INFO (qtp527904507-1865) [n:127.0.0.1:56822_solr ]
o.a.s.h.a.CollectionsHandler Invoked Collection Action :create with params
replicationFactor=2&collection.configName=conf1&router.name=implicit&version=2&pullReplicas=1&shards=shard1,shard2&property.customKey=customValue&maxShardsPerNode=3&router.field=shard_s&autoAddReplicas=true&name=hdfsbackuprestore&nrtReplicas=2&action=CREATE&tlogReplicas=0&wt=javabin
and sendToOCPQueue=true
[junit4] 2> 962618 INFO
(OverseerThreadFactory-775-thread-1-processing-n:127.0.0.1:56822_solr)
[n:127.0.0.1:56822_solr ] o.a.s.c.a.c.CreateCollectionCmd Create collection
hdfsbackuprestore
[junit4] 2> 962619 WARN
(OverseerThreadFactory-775-thread-1-processing-n:127.0.0.1:56822_solr)
[n:127.0.0.1:56822_solr ] o.a.s.c.a.c.CreateCollectionCmd Specified number
of replicas of 3 on collection hdfsbackuprestore is higher than the number of
Solr instances currently live or live and part of your createNodeSet(2). It's
unusual to run two replica of the same slice on the same Solr-instance.
[junit4] 2> 962993 INFO
(OverseerStateUpdate-73210912353026054-127.0.0.1:56822_solr-n_0000000000)
[n:127.0.0.1:56822_solr ] o.a.s.c.o.SliceMutator createReplica() {
[junit4] 2> "operation":"ADDREPLICA",
[junit4] 2> "collection":"hdfsbackuprestore",
[junit4] 2> "shard":"shard1",
[junit4] 2> "core":"hdfsbackuprestore_shard1_replica_n1",
[junit4] 2> "state":"down",
[junit4] 2> "base_url":"http://127.0.0.1:45076/solr",
[junit4] 2> "type":"NRT",
[junit4] 2> "waitForFinalState":"false"}
[junit4] 2> 963025 INFO
(OverseerStateUpdate-73210912353026054-127.0.0.1:56822_solr-n_0000000000)
[n:127.0.0.1:56822_solr ] o.a.s.c.o.SliceMutator createReplica() {
[junit4] 2> "operation":"ADDREPLICA",
[junit4] 2> "collection":"hdfsbackuprestore",
[junit4] 2> "shard":"shard1",
[junit4] 2> "core":"hdfsbackuprestore_shard1_replica_n2",
[junit4] 2> "state":"down",
[junit4] 2> "base_url":"http://127.0.0.1:56822/solr",
[junit4] 2> "type":"NRT",
[junit4] 2> "waitForFinalState":"false"}
[junit4] 2> 963059 INFO
(OverseerStateUpdate-73210912353026054-127.0.0.1:56822_solr-n_0000000000)
[n:127.0.0.1:56822_solr ] o.a.s.c.o.SliceMutator createReplica() {
[junit4] 2> "operation":"ADDREPLICA",
[junit4] 2> "collection":"hdfsbackuprestore",
[junit4] 2> "shard":"shard1",
[junit4] 2> "core":"hdfsbackuprestore_shard1_replica_p4",
[junit4] 2> "state":"down",
[junit4] 2> "base_url":"http://127.0.0.1:45076/solr",
[junit4] 2> "type":"PULL",
[junit4] 2> "waitForFinalState":"false"}
[junit4] 2> 963125 INFO
(OverseerStateUpdate-73210912353026054-127.0.0.1:56822_solr-n_0000000000)
[n:127.0.0.1:56822_solr ] o.a.s.c.o.SliceMutator createReplica() {
[junit4] 2> "operation":"ADDREPLICA",
[junit4] 2> "collection":"hdfsbackuprestore",
[junit4] 2> "shard":"shard2",
[junit4] 2> "core":"hdfsbackuprestore_shard2_replica_n6",
[junit4] 2> "state":"down",
[junit4] 2> "base_url":"http://127.0.0.1:56822/solr",
[junit4] 2> "type":"NRT",
[junit4] 2> "waitForFinalState":"false"}
[junit4] 2> 963193 INFO
(OverseerStateUpdate-73210912353026054-127.0.0.1:56822_solr-n_0000000000)
[n:127.0.0.1:56822_solr ] o.a.s.c.o.SliceMutator createReplica() {
[junit4] 2> "operation":"ADDREPLICA",
[junit4] 2> "collection":"hdfsbackuprestore",
[junit4] 2> "shard":"shard2",
[junit4] 2> "core":"hdfsbackuprestore_shard2_replica_n8",
[junit4] 2> "state":"down",
[junit4] 2> "base_url":"http://127.0.0.1:45076/solr",
[junit4] 2> "type":"NRT",
[junit4] 2> "waitForFinalState":"false"}
[junit4] 2> 963329 INFO
(OverseerStateUpdate-73210912353026054-127.0.0.1:56822_solr-n_0000000000)
[n:127.0.0.1:56822_solr ] o.a.s.c.o.SliceMutator createReplica() {
[junit4] 2> "operation":"ADDREPLICA",
[junit4] 2> "collection":"hdfsbackuprestore",
[junit4] 2> "shard":"shard2",
[junit4] 2> "core":"hdfsbackuprestore_shard2_replica_p10",
[junit4] 2> "state":"down",
[junit4] 2> "base_url":"http://127.0.0.1:56822/solr",
[junit4] 2> "type":"PULL",
[junit4] 2> "waitForFinalState":"false"}
[junit4] 2> 963722 INFO (qtp1066405676-1871) [n:127.0.0.1:45076_solr ]
o.a.s.h.a.CoreAdminOperation core create command
qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore&version=2&replicaType=NRT&property.customKey=customValue&coreNodeName=core_node3&name=hdfsbackuprestore_shard1_replica_n1&action=CREATE&numShards=2&shard=shard1&wt=javabin
[junit4] 2> 963722 INFO (qtp1066405676-1871) [n:127.0.0.1:45076_solr ]
o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 2147483647
transient cores
[junit4] 2> 963770 INFO (qtp527904507-1861) [n:127.0.0.1:56822_solr ]
o.a.s.h.a.CoreAdminOperation core create command
qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore&version=2&replicaType=PULL&property.customKey=customValue&coreNodeName=core_node12&name=hdfsbackuprestore_shard2_replica_p10&action=CREATE&numShards=2&shard=shard2&wt=javabin
[junit4] 2> 963777 INFO (qtp527904507-1861) [n:127.0.0.1:56822_solr ]
o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 2147483647
transient cores
[junit4] 2> 964183 INFO
(zkCallback-173-thread-1-processing-n:127.0.0.1:56822_solr)
[n:127.0.0.1:56822_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 964203 INFO
(zkCallback-173-thread-2-processing-n:127.0.0.1:56822_solr)
[n:127.0.0.1:56822_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 964204 INFO
(zkCallback-171-thread-1-processing-n:127.0.0.1:45076_solr)
[n:127.0.0.1:45076_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 964330 INFO (qtp1066405676-1870) [n:127.0.0.1:45076_solr ]
o.a.s.h.a.CoreAdminOperation core create command
qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore&version=2&replicaType=NRT&property.customKey=customValue&coreNodeName=core_node11&name=hdfsbackuprestore_shard2_replica_n8&action=CREATE&numShards=2&shard=shard2&wt=javabin
[junit4] 2> 964335 INFO (qtp527904507-1863) [n:127.0.0.1:56822_solr ]
o.a.s.h.a.CoreAdminOperation core create command
qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore&version=2&replicaType=NRT&property.customKey=customValue&coreNodeName=core_node5&name=hdfsbackuprestore_shard1_replica_n2&action=CREATE&numShards=2&shard=shard1&wt=javabin
[junit4] 2> 964357 INFO (qtp1066405676-1877) [n:127.0.0.1:45076_solr ]
o.a.s.h.a.CoreAdminOperation core create command
qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore&version=2&replicaType=PULL&property.customKey=customValue&coreNodeName=core_node7&name=hdfsbackuprestore_shard1_replica_p4&action=CREATE&numShards=2&shard=shard1&wt=javabin
[junit4] 2> 964398 INFO (qtp527904507-1867) [n:127.0.0.1:56822_solr ]
o.a.s.h.a.CoreAdminOperation core create command
qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore&version=2&replicaType=NRT&property.customKey=customValue&coreNodeName=core_node9&name=hdfsbackuprestore_shard2_replica_n6&action=CREATE&numShards=2&shard=shard2&wt=javabin
[junit4] 2> 964504 INFO (qtp1066405676-1871) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard1 r:core_node3
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.SolrConfig Using Lucene
MatchVersion: 7.3.0
[junit4] 2> 964538 INFO
(zkCallback-173-thread-3-processing-n:127.0.0.1:56822_solr)
[n:127.0.0.1:56822_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 964554 INFO
(zkCallback-173-thread-2-processing-n:127.0.0.1:56822_solr)
[n:127.0.0.1:56822_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 964554 INFO
(zkCallback-171-thread-1-processing-n:127.0.0.1:45076_solr)
[n:127.0.0.1:45076_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 964587 INFO
(zkCallback-171-thread-2-processing-n:127.0.0.1:45076_solr)
[n:127.0.0.1:45076_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 964686 INFO (qtp1066405676-1871) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard1 r:core_node3
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.s.IndexSchema
[hdfsbackuprestore_shard1_replica_n1] Schema name=minimal
[junit4] 2> 964823 INFO (qtp1066405676-1871) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard1 r:core_node3
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.s.IndexSchema Loaded schema
minimal/1.1 with uniqueid field id
[junit4] 2> 964823 INFO (qtp1066405676-1871) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard1 r:core_node3
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.CoreContainer Creating SolrCore
'hdfsbackuprestore_shard1_replica_n1' using configuration from collection
hdfsbackuprestore, trusted=true
[junit4] 2> 964824 INFO (qtp1066405676-1871) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard1 r:core_node3
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.m.r.SolrJmxReporter JMX monitoring
for 'solr.core.hdfsbackuprestore.shard1.replica_n1' (registry
'solr.core.hdfsbackuprestore.shard1.replica_n1') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@52a4746c
[junit4] 2> 964824 INFO (qtp1066405676-1871) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard1 r:core_node3
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.SolrCore
solr.RecoveryStrategy.Builder
[junit4] 2> 964824 INFO (qtp1066405676-1871) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard1 r:core_node3
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.SolrCore
[[hdfsbackuprestore_shard1_replica_n1] ] Opening new SolrCore at
[/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_963C9F53888D1D35-001/tempDir-002/node1/hdfsbackuprestore_shard1_replica_n1],
dataDir=[/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_963C9F53888D1D35-001/tempDir-002/node1/./hdfsbackuprestore_shard1_replica_n1/data/]
[junit4] 2> 965288 INFO (qtp527904507-1861) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard2 r:core_node12
x:hdfsbackuprestore_shard2_replica_p10] o.a.s.c.SolrConfig Using Lucene
MatchVersion: 7.3.0
[junit4] 2> 965450 INFO (qtp527904507-1861) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard2 r:core_node12
x:hdfsbackuprestore_shard2_replica_p10] o.a.s.s.IndexSchema
[hdfsbackuprestore_shard2_replica_p10] Schema name=minimal
[junit4] 2> 965497 INFO (qtp527904507-1861) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard2 r:core_node12
x:hdfsbackuprestore_shard2_replica_p10] o.a.s.s.IndexSchema Loaded schema
minimal/1.1 with uniqueid field id
[junit4] 2> 965497 INFO (qtp527904507-1861) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard2 r:core_node12
x:hdfsbackuprestore_shard2_replica_p10] o.a.s.c.CoreContainer Creating SolrCore
'hdfsbackuprestore_shard2_replica_p10' using configuration from collection
hdfsbackuprestore, trusted=true
[junit4] 2> 965498 INFO (qtp527904507-1861) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard2 r:core_node12
x:hdfsbackuprestore_shard2_replica_p10] o.a.s.m.r.SolrJmxReporter JMX
monitoring for 'solr.core.hdfsbackuprestore.shard2.replica_p10' (registry
'solr.core.hdfsbackuprestore.shard2.replica_p10') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@52a4746c
[junit4] 2> 965499 INFO (qtp527904507-1861) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard2 r:core_node12
x:hdfsbackuprestore_shard2_replica_p10] o.a.s.c.SolrCore
solr.RecoveryStrategy.Builder
[junit4] 2> 965499 INFO (qtp527904507-1861) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard2 r:core_node12
x:hdfsbackuprestore_shard2_replica_p10] o.a.s.c.SolrCore
[[hdfsbackuprestore_shard2_replica_p10] ] Opening new SolrCore at
[/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_963C9F53888D1D35-001/tempDir-002/node2/hdfsbackuprestore_shard2_replica_p10],
dataDir=[/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_963C9F53888D1D35-001/tempDir-002/node2/./hdfsbackuprestore_shard2_replica_p10/data/]
[junit4] 2> 966125 INFO (qtp527904507-1867) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard2 r:core_node9
x:hdfsbackuprestore_shard2_replica_n6] o.a.s.c.SolrConfig Using Lucene
MatchVersion: 7.3.0
[junit4] 2> 966163 INFO (qtp527904507-1863) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_n2] o.a.s.c.SolrConfig Using Lucene
MatchVersion: 7.3.0
[junit4] 2> 966168 INFO (qtp1066405676-1870) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard2 r:core_node11
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.c.SolrConfig Using Lucene
MatchVersion: 7.3.0
[junit4] 2> 966212 INFO (qtp1066405676-1877) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard1 r:core_node7
x:hdfsbackuprestore_shard1_replica_p4] o.a.s.c.SolrConfig Using Lucene
MatchVersion: 7.3.0
[junit4] 2> 966727 INFO (qtp527904507-1867) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard2 r:core_node9
x:hdfsbackuprestore_shard2_replica_n6] o.a.s.s.IndexSchema
[hdfsbackuprestore_shard2_replica_n6] Schema name=minimal
[junit4] 2> 966790 INFO (qtp527904507-1867) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard2 r:core_node9
x:hdfsbackuprestore_shard2_replica_n6] o.a.s.s.IndexSchema Loaded schema
minimal/1.1 with uniqueid field id
[junit4] 2> 966790 INFO (qtp527904507-1867) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard2 r:core_node9
x:hdfsbackuprestore_shard2_replica_n6] o.a.s.c.CoreContainer Creating SolrCore
'hdfsbackuprestore_shard2_replica_n6' using configuration from collection
hdfsbackuprestore, trusted=true
[junit4] 2> 966791 INFO (qtp527904507-1867) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard2 r:core_node9
x:hdfsbackuprestore_shard2_replica_n6] o.a.s.m.r.SolrJmxReporter JMX monitoring
for 'solr.core.hdfsbackuprestore.shard2.replica_n6' (registry
'solr.core.hdfsbackuprestore.shard2.replica_n6') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@52a4746c
[junit4] 2> 966791 INFO (qtp527904507-1867) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard2 r:core_node9
x:hdfsbackuprestore_shard2_replica_n6] o.a.s.c.SolrCore
solr.RecoveryStrategy.Builder
[junit4] 2> 966792 INFO (qtp527904507-1867) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard2 r:core_node9
x:hdfsbackuprestore_shard2_replica_n6] o.a.s.c.SolrCore
[[hdfsbackuprestore_shard2_replica_n6] ] Opening new SolrCore at
[/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_963C9F53888D1D35-001/tempDir-002/node2/hdfsbackuprestore_shard2_replica_n6],
dataDir=[/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_963C9F53888D1D35-001/tempDir-002/node2/./hdfsbackuprestore_shard2_replica_n6/data/]
[junit4] 2> 966990 INFO (qtp1066405676-1877) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard1 r:core_node7
x:hdfsbackuprestore_shard1_replica_p4] o.a.s.s.IndexSchema
[hdfsbackuprestore_shard1_replica_p4] Schema name=minimal
[junit4] 2> 967014 INFO (qtp527904507-1863) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_n2] o.a.s.s.IndexSchema
[hdfsbackuprestore_shard1_replica_n2] Schema name=minimal
[junit4] 2> 967085 INFO (qtp1066405676-1877) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard1 r:core_node7
x:hdfsbackuprestore_shard1_replica_p4] o.a.s.s.IndexSchema Loaded schema
minimal/1.1 with uniqueid field id
[junit4] 2> 967085 INFO (qtp1066405676-1877) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard1 r:core_node7
x:hdfsbackuprestore_shard1_replica_p4] o.a.s.c.CoreContainer Creating SolrCore
'hdfsbackuprestore_shard1_replica_p4' using configuration from collection
hdfsbackuprestore, trusted=true
[junit4] 2> 967086 INFO (qtp527904507-1863) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_n2] o.a.s.s.IndexSchema Loaded schema
minimal/1.1 with uniqueid field id
[junit4] 2> 967086 INFO (qtp527904507-1863) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_n2] o.a.s.c.CoreContainer Creating SolrCore
'hdfsbackuprestore_shard1_replica_n2' using configuration from collection
hdfsbackuprestore, trusted=true
[junit4] 2> 967097 INFO (qtp1066405676-1877) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard1 r:core_node7
x:hdfsbackuprestore_shard1_replica_p4] o.a.s.m.r.SolrJmxReporter JMX monitoring
for 'solr.core.hdfsbackuprestore.shard1.replica_p4' (registry
'solr.core.hdfsbackuprestore.shard1.replica_p4') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@52a4746c
[junit4] 2> 967097 INFO (qtp1066405676-1877) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard1 r:core_node7
x:hdfsbackuprestore_shard1_replica_p4] o.a.s.c.SolrCore
solr.RecoveryStrategy.Builder
[junit4] 2> 967097 INFO (qtp1066405676-1877) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard1 r:core_node7
x:hdfsbackuprestore_shard1_replica_p4] o.a.s.c.SolrCore
[[hdfsbackuprestore_shard1_replica_p4] ] Opening new SolrCore at
[/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_963C9F53888D1D35-001/tempDir-002/node1/hdfsbackuprestore_shard1_replica_p4],
dataDir=[/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_963C9F53888D1D35-001/tempDir-002/node1/./hdfsbackuprestore_shard1_replica_p4/data/]
[junit4] 2> 967269 INFO (qtp527904507-1863) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_n2] o.a.s.m.r.SolrJmxReporter JMX monitoring
for 'solr.core.hdfsbackuprestore.shard1.replica_n2' (registry
'solr.core.hdfsbackuprestore.shard1.replica_n2') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@52a4746c
[junit4] 2> 967269 INFO (qtp527904507-1863) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_n2] o.a.s.c.SolrCore
solr.RecoveryStrategy.Builder
[junit4] 2> 967269 INFO (qtp527904507-1863) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_n2] o.a.s.c.SolrCore
[[hdfsbackuprestore_shard1_replica_n2] ] Opening new SolrCore at
[/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_963C9F53888D1D35-001/tempDir-002/node2/hdfsbackuprestore_shard1_replica_n2],
dataDir=[/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_963C9F53888D1D35-001/tempDir-002/node2/./hdfsbackuprestore_shard1_replica_n2/data/]
[junit4] 2> 967777 INFO (qtp1066405676-1870) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard2 r:core_node11
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.s.IndexSchema
[hdfsbackuprestore_shard2_replica_n8] Schema name=minimal
[junit4] 2> 968010 INFO (qtp1066405676-1870) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard2 r:core_node11
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.s.IndexSchema Loaded schema
minimal/1.1 with uniqueid field id
[junit4] 2> 968010 INFO (qtp1066405676-1870) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard2 r:core_node11
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.c.CoreContainer Creating SolrCore
'hdfsbackuprestore_shard2_replica_n8' using configuration from collection
hdfsbackuprestore, trusted=true
[junit4] 2> 968011 INFO (qtp1066405676-1870) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard2 r:core_node11
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.m.r.SolrJmxReporter JMX monitoring
for 'solr.core.hdfsbackuprestore.shard2.replica_n8' (registry
'solr.core.hdfsbackuprestore.shard2.replica_n8') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@52a4746c
[junit4] 2> 968011 INFO (qtp1066405676-1870) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard2 r:core_node11
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.c.SolrCore
solr.RecoveryStrategy.Builder
[junit4] 2> 968011 INFO (qtp1066405676-1870) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard2 r:core_node11
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.c.SolrCore
[[hdfsbackuprestore_shard2_replica_n8] ] Opening new SolrCore at
[/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_963C9F53888D1D35-001/tempDir-002/node1/hdfsbackuprestore_shard2_replica_n8],
dataDir=[/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_963C9F53888D1D35-001/tempDir-002/node1/./hdfsbackuprestore_shard2_replica_n8/data/]
[junit4] 2> 969016 INFO (qtp1066405676-1871) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard1 r:core_node3
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.UpdateHandler Using UpdateLog
implementation: org.apache.solr.update.UpdateLog
[junit4] 2> 969016 INFO (qtp1066405676-1871) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard1 r:core_node3
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.UpdateLog Initializing
UpdateLog: dataDir=null defaultSyncLevel=FLUSH numRecordsToKeep=100
maxNumLogsToKeep=10 numVersionBuckets=65536
[junit4] 2> 969019 INFO (qtp1066405676-1871) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard1 r:core_node3
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.CommitTracker Hard AutoCommit:
disabled
[junit4] 2> 969019 INFO (qtp1066405676-1871) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard1 r:core_node3
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.CommitTracker Soft AutoCommit:
disabled
[junit4] 2> 969045 INFO (qtp527904507-1861) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard2 r:core_node12
x:hdfsbackuprestore_shard2_replica_p10] o.a.s.u.CommitTracker Hard AutoCommit:
disabled
[junit4] 2> 969045 INFO (qtp527904507-1861) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard2 r:core_node12
x:hdfsbackuprestore_shard2_replica_p10] o.a.s.u.CommitTracker Soft AutoCommit:
disabled
[junit4] 2> 969047 INFO (qtp1066405676-1871) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard1 r:core_node3
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.s.SolrIndexSearcher Opening
[Searcher@ab1621a[hdfsbackuprestore_shard1_replica_n1] main]
[junit4] 2> 969071 INFO (qtp1066405676-1871) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard1 r:core_node3
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.r.ManagedResourceStorage
Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
[junit4] 2> 969081 INFO (qtp1066405676-1871) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard1 r:core_node3
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.r.ManagedResourceStorage Loaded
null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
[junit4] 2> 969158 INFO (qtp1066405676-1871) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard1 r:core_node3
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.h.ReplicationHandler Commits will
be reserved for 10000ms.
[junit4] 2> 969161 INFO (qtp527904507-1861) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard2 r:core_node12
x:hdfsbackuprestore_shard2_replica_p10] o.a.s.s.SolrIndexSearcher Opening
[Searcher@6419745b[hdfsbackuprestore_shard2_replica_p10] main]
[junit4] 2> 969259 INFO
(searcherExecutor-780-thread-1-processing-n:127.0.0.1:45076_solr
x:hdfsbackuprestore_shard1_replica_n1 s:shard1 c:hdfsbackuprestore
r:core_node3) [n:127.0.0.1:45076_solr c:hdfsbackuprestore s:shard1 r:core_node3
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.SolrCore
[hdfsbackuprestore_shard1_replica_n1] Registered new searcher
Searcher@ab1621a[hdfsbackuprestore_shard1_replica_n1]
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
[junit4] 2> 969318 INFO (qtp1066405676-1871) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard1 r:core_node3
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.UpdateLog Could not find max
version in index or recent updates, using new clock 1591052962856173568
[junit4] 2> 969341 INFO (qtp527904507-1861) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard2 r:core_node12
x:hdfsbackuprestore_shard2_replica_p10] o.a.s.r.ManagedResourceStorage
Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
[junit4] 2> 969342 INFO (qtp527904507-1861) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard2 r:core_node12
x:hdfsbackuprestore_shard2_replica_p10] o.a.s.r.ManagedResourceStorage Loaded
null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
[junit4] 2> 969343 INFO (qtp527904507-1861) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard2 r:core_node12
x:hdfsbackuprestore_shard2_replica_p10] o.a.s.h.ReplicationHandler Commits will
be reserved for 10000ms.
[junit4] 2> 970013 INFO (qtp527904507-1861) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard2 r:core_node12
x:hdfsbackuprestore_shard2_replica_p10] o.a.s.c.ZkController
hdfsbackuprestore_shard2_replica_p10 starting background replication from leader
[junit4] 2> 970015 INFO (qtp527904507-1861) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard2 r:core_node12
x:hdfsbackuprestore_shard2_replica_p10] o.a.s.c.ReplicateFromLeader Will start
replication from leader with poll interval: 00:00:03
[junit4] 2> 970079 INFO (qtp527904507-1861) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard2 r:core_node12
x:hdfsbackuprestore_shard2_replica_p10] o.a.s.h.ReplicationHandler Poll
scheduled at an interval of 3000ms
[junit4] 2> 970079 INFO (qtp527904507-1861) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard2 r:core_node12
x:hdfsbackuprestore_shard2_replica_p10] o.a.s.h.ReplicationHandler Commits will
be reserved for 10000ms.
[junit4] 2> 970081 INFO (qtp1066405676-1871) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard1 r:core_node3
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext
Waiting until we see more replicas up for shard shard1: total=2 found=1
timeoutin=9999ms
[junit4] 2> 970129 INFO
(searcherExecutor-781-thread-1-processing-n:127.0.0.1:56822_solr
x:hdfsbackuprestore_shard2_replica_p10 s:shard2 c:hdfsbackuprestore
r:core_node12) [n:127.0.0.1:56822_solr c:hdfsbackuprestore s:shard2
r:core_node12 x:hdfsbackuprestore_shard2_replica_p10] o.a.s.c.SolrCore
[hdfsbackuprestore_shard2_replica_p10] Registered new searcher
Searcher@6419745b[hdfsbackuprestore_shard2_replica_p10]
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
[junit4] 2> 970145 INFO
(zkCallback-173-thread-2-processing-n:127.0.0.1:56822_solr)
[n:127.0.0.1:56822_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 970145 INFO
(zkCallback-173-thread-3-processing-n:127.0.0.1:56822_solr)
[n:127.0.0.1:56822_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 970149 INFO
(zkCallback-171-thread-2-processing-n:127.0.0.1:45076_solr)
[n:127.0.0.1:45076_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 970149 INFO
(zkCallback-171-thread-1-processing-n:127.0.0.1:45076_solr)
[n:127.0.0.1:45076_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 970193 INFO (qtp1066405676-1877) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard1 r:core_node7
x:hdfsbackuprestore_shard1_replica_p4] o.a.s.u.CommitTracker Hard AutoCommit:
disabled
[junit4] 2> 970193 INFO (qtp1066405676-1877) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard1 r:core_node7
x:hdfsbackuprestore_shard1_replica_p4] o.a.s.u.CommitTracker Soft AutoCommit:
disabled
[junit4] 2> 970617 INFO (qtp527904507-1867) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard2 r:core_node9
x:hdfsbackuprestore_shard2_replica_n6] o.a.s.u.UpdateHandler Using UpdateLog
implementation: org.apache.solr.update.UpdateLog
[junit4] 2> 970617 INFO (qtp527904507-1867) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard2 r:core_node9
x:hdfsbackuprestore_shard2_replica_n6] o.a.s.u.UpdateLog Initializing
UpdateLog: dataDir=null defaultSyncLevel=FLUSH numRecordsToKeep=100
maxNumLogsToKeep=10 numVersionBuckets=65536
[junit4] 2> 970619 INFO (qtp527904507-1867) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard2 r:core_node9
x:hdfsbackuprestore_shard2_replica_n6] o.a.s.u.CommitTracker Hard AutoCommit:
disabled
[junit4] 2> 970619 INFO (qtp527904507-1867) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard2 r:core_node9
x:hdfsbackuprestore_shard2_replica_n6] o.a.s.u.CommitTracker Soft AutoCommit:
disabled
[junit4] 2> 970657 INFO (qtp527904507-1867) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard2 r:core_node9
x:hdfsbackuprestore_shard2_replica_n6] o.a.s.s.SolrIndexSearcher Opening
[Searcher@23ae362d[hdfsbackuprestore_shard2_replica_n6] main]
[junit4] 2> 970659 INFO (qtp527904507-1867) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard2 r:core_node9
x:hdfsbackuprestore_shard2_replica_n6] o.a.s.r.ManagedResourceStorage
Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
[junit4] 2> 970660 INFO (qtp527904507-1867) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard2 r:core_node9
x:hdfsbackuprestore_shard2_replica_n6] o.a.s.r.ManagedResourceStorage Loaded
null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
[junit4] 2> 970673 INFO (qtp527904507-1867) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard2 r:core_node9
x:hdfsbackuprestore_shard2_replica_n6] o.a.s.h.ReplicationHandler Commits will
be reserved for 10000ms.
[junit4] 2> 970673 INFO (qtp527904507-1867) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard2 r:core_node9
x:hdfsbackuprestore_shard2_replica_n6] o.a.s.u.UpdateLog Could not find max
version in index or recent updates, using new clock 1591052964276994048
[junit4] 2> 970677 INFO (qtp1066405676-1877) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard1 r:core_node7
x:hdfsbackuprestore_shard1_replica_p4] o.a.s.s.SolrIndexSearcher Opening
[Searcher@7f2f4e9c[hdfsbackuprestore_shard1_replica_p4] main]
[junit4] 2> 970695 INFO (qtp1066405676-1877) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard1 r:core_node7
x:hdfsbackuprestore_shard1_replica_p4] o.a.s.r.ManagedResourceStorage
Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
[junit4] 2> 970695 INFO (qtp1066405676-1877) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard1 r:core_node7
x:hdfsbackuprestore_shard1_replica_p4] o.a.s.r.ManagedResourceStorage Loaded
null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
[junit4] 2> 970697 INFO (qtp1066405676-1877) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard1 r:core_node7
x:hdfsbackuprestore_shard1_replica_p4] o.a.s.h.ReplicationHandler Commits will
be reserved for 10000ms.
[junit4] 2> 970713 INFO (qtp1066405676-1870) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard2 r:core_node11
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.u.UpdateHandler Using UpdateLog
implementation: org.apache.solr.update.UpdateLog
[junit4] 2> 970714 INFO (qtp1066405676-1870) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard2 r:core_node11
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.u.UpdateLog Initializing
UpdateLog: dataDir=null defaultSyncLevel=FLUSH numRecordsToKeep=100
maxNumLogsToKeep=10 numVersionBuckets=65536
[junit4] 2> 970716 INFO (qtp1066405676-1870) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard2 r:core_node11
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.u.CommitTracker Hard AutoCommit:
disabled
[junit4] 2> 970716 INFO (qtp1066405676-1870) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard2 r:core_node11
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.u.CommitTracker Soft AutoCommit:
disabled
[junit4] 2> 970845 INFO (qtp1066405676-1870) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard2 r:core_node11
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.s.SolrIndexSearcher Opening
[Searcher@1414bf[hdfsbackuprestore_shard2_replica_n8] main]
[junit4] 2> 970846 INFO (qtp527904507-1863) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_n2] o.a.s.u.UpdateHandler Using UpdateLog
implementation: org.apache.solr.update.UpdateLog
[junit4] 2> 970846 INFO (qtp527904507-1863) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_n2] o.a.s.u.UpdateLog Initializing
UpdateLog: dataDir=null defaultSyncLevel=FLUSH numRecordsToKeep=100
maxNumLogsToKeep=10 numVersionBuckets=65536
[junit4] 2> 970862 INFO
(searcherExecutor-782-thread-1-processing-n:127.0.0.1:56822_solr
x:hdfsbackuprestore_shard2_replica_n6 s:shard2 c:hdfsbackuprestore
r:core_node9) [n:127.0.0.1:56822_solr c:hdfsbackuprestore s:shard2 r:core_node9
x:hdfsbackuprestore_shard2_replica_n6] o.a.s.c.SolrCore
[hdfsbackuprestore_shard2_replica_n6] Registered new searcher
Searcher@23ae362d[hdfsbackuprestore_shard2_replica_n6]
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
[junit4] 2> 970863 INFO
(searcherExecutor-783-thread-1-processing-n:127.0.0.1:45076_solr
x:hdfsbackuprestore_shard1_replica_p4 s:shard1 c:hdfsbackuprestore
r:core_node7) [n:127.0.0.1:45076_solr c:hdfsbackuprestore s:shard1 r:core_node7
x:hdfsbackuprestore_shard1_replica_p4] o.a.s.c.SolrCore
[hdfsbackuprestore_shard1_replica_p4] Registered new searcher
Searcher@7f2f4e9c[hdfsbackuprestore_shard1_replica_p4]
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
[junit4] 2> 971467 INFO (qtp527904507-1863) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_n2] o.a.s.u.CommitTracker Hard AutoCommit:
disabled
[junit4] 2> 971467 INFO (qtp527904507-1863) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_n2] o.a.s.u.CommitTracker Soft AutoCommit:
disabled
[junit4] 2> 971468 INFO (qtp1066405676-1870) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard2 r:core_node11
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.r.ManagedResourceStorage
Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
[junit4] 2> 971469 INFO (qtp1066405676-1870) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard2 r:core_node11
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.r.ManagedResourceStorage Loaded
null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
[junit4] 2> 971470 INFO (qtp1066405676-1870) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard2 r:core_node11
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.h.ReplicationHandler Commits will
be reserved for 10000ms.
[junit4] 2> 971470 INFO (qtp1066405676-1870) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard2 r:core_node11
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.u.UpdateLog Could not find max
version in index or recent updates, using new clock 1591052965112709120
[junit4] 2> 971489 INFO
(searcherExecutor-787-thread-1-processing-n:127.0.0.1:45076_solr
x:hdfsbackuprestore_shard2_replica_n8 s:shard2 c:hdfsbackuprestore
r:core_node11) [n:127.0.0.1:45076_solr c:hdfsbackuprestore s:shard2
r:core_node11 x:hdfsbackuprestore_shard2_replica_n8] o.a.s.c.SolrCore
[hdfsbackuprestore_shard2_replica_n8] Registered new searcher
Searcher@1414bf[hdfsbackuprestore_shard2_replica_n8]
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
[junit4] 2> 971620 INFO (qtp527904507-1863) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_n2] o.a.s.s.SolrIndexSearcher Opening
[Searcher@554bd268[hdfsbackuprestore_shard1_replica_n2] main]
[junit4] 2> 971621 INFO (qtp1066405676-1877) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard1 r:core_node7
x:hdfsbackuprestore_shard1_replica_p4] o.a.s.c.ZkController
hdfsbackuprestore_shard1_replica_p4 starting background replication from leader
[junit4] 2> 971622 INFO (qtp1066405676-1877) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard1 r:core_node7
x:hdfsbackuprestore_shard1_replica_p4] o.a.s.c.ReplicateFromLeader Will start
replication from leader with poll interval: 00:00:03
[junit4] 2> 971635 INFO (qtp1066405676-1877) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard1 r:core_node7
x:hdfsbackuprestore_shard1_replica_p4] o.a.s.h.ReplicationHandler Poll
scheduled at an interval of 3000ms
[junit4] 2> 971635 INFO (qtp1066405676-1877) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard1 r:core_node7
x:hdfsbackuprestore_shard1_replica_p4] o.a.s.h.ReplicationHandler Commits will
be reserved for 10000ms.
[junit4] 2> 971665 INFO (qtp527904507-1863) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_n2] o.a.s.r.ManagedResourceStorage
Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
[junit4] 2> 971673 INFO (qtp527904507-1863) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_n2] o.a.s.r.ManagedResourceStorage Loaded
null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
[junit4] 2> 971674 INFO (qtp527904507-1863) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_n2] o.a.s.h.ReplicationHandler Commits will
be reserved for 10000ms.
[junit4] 2> 971675 INFO (qtp527904507-1863) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_n2] o.a.s.u.UpdateLog Could not find max
version in index or recent updates, using new clock 1591052965327667200
[junit4] 2> 971786 INFO
(searcherExecutor-784-thread-1-processing-n:127.0.0.1:56822_solr
x:hdfsbackuprestore_shard1_replica_n2 s:shard1 c:hdfsbackuprestore
r:core_node5) [n:127.0.0.1:56822_solr c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_n2] o.a.s.c.SolrCore
[hdfsbackuprestore_shard1_replica_n2] Registered new searcher
Searcher@554bd268[hdfsbackuprestore_shard1_replica_n2]
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
[junit4] 2> 971904 INFO (qtp527904507-1867) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard2 r:core_node9
x:hdfsbackuprestore_shard2_replica_n6] o.a.s.c.ShardLeaderElectionContext
Enough replicas found to continue.
[junit4] 2> 971905 INFO (qtp527904507-1867) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard2 r:core_node9
x:hdfsbackuprestore_shard2_replica_n6] o.a.s.c.ShardLeaderElectionContext I may
be the new leader - try and sync
[junit4] 2> 971905 INFO (qtp527904507-1867) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard2 r:core_node9
x:hdfsbackuprestore_shard2_replica_n6] o.a.s.c.SyncStrategy Sync replicas to
http://127.0.0.1:56822/solr/hdfsbackuprestore_shard2_replica_n6/
[junit4] 2> 971932 INFO (qtp527904507-1867) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard2 r:core_node9
x:hdfsbackuprestore_shard2_replica_n6] o.a.s.u.PeerSync PeerSync:
core=hdfsbackuprestore_shard2_replica_n6 url=http://127.0.0.1:56822/solr START
replicas=[http://127.0.0.1:45076/solr/hdfsbackuprestore_shard2_replica_n8/]
nUpdates=100
[junit4] 2> 971978 INFO (qtp1066405676-1871) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard1 r:core_node3
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext
Enough replicas found to continue.
[junit4] 2> 971978 INFO (qtp1066405676-1871) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard1 r:core_node3
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext I may
be the new leader - try and sync
[junit4] 2> 971978 INFO (qtp1066405676-1871) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard1 r:core_node3
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.SyncStrategy Sync replicas to
http://127.0.0.1:45076/solr/hdfsbackuprestore_shard1_replica_n1/
[junit4] 2> 971979 INFO (qtp1066405676-1871) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard1 r:core_node3
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.PeerSync PeerSync:
core=hdfsbackuprestore_shard1_replica_n1 url=http://127.0.0.1:45076/solr START
replicas=[http://127.0.0.1:56822/solr/hdfsbackuprestore_shard1_replica_n2/]
nUpdates=100
[junit4] 2> 972002 INFO
(zkCallback-171-thread-1-processing-n:127.0.0.1:45076_solr)
[n:127.0.0.1:45076_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 972003 INFO
(zkCallback-173-thread-3-processing-n:127.0.0.1:56822_solr)
[n:127.0.0.1:56822_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 972003 INFO
(zkCallback-173-thread-2-processing-n:127.0.0.1:56822_solr)
[n:127.0.0.1:56822_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 972073 INFO
(zkCallback-171-thread-3-processing-n:127.0.0.1:45076_solr)
[n:127.0.0.1:45076_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 972149 INFO (qtp527904507-1938) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_n2] o.a.s.c.S.Request
[hdfsbackuprestore_shard1_replica_n2] webapp=/solr path=/get
params={distrib=false&qt=/get&fingerprint=false&getVersions=100&wt=javabin&version=2}
status=0 QTime=19
[junit4] 2> 972298 INFO (qtp1066405676-1872) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard2 r:core_node11
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.c.S.Request
[hdfsbackuprestore_shard2_replica_n8] webapp=/solr path=/get
params={distrib=false&qt=/get&fingerprint=false&getVersions=100&wt=javabin&version=2}
status=0 QTime=124
[junit4] 2> 972301 INFO (qtp527904507-1867) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard2 r:core_node9
x:hdfsbackuprestore_shard2_replica_n6] o.a.s.u.PeerSync PeerSync:
core=hdfsbackuprestore_shard2_replica_n6 url=http://127.0.0.1:56822/solr DONE.
We have no versions. sync failed.
[junit4] 2> 972325 INFO (qtp527904507-1867) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard2 r:core_node9
x:hdfsbackuprestore_shard2_replica_n6] o.a.s.c.SyncStrategy Leader's attempt to
sync with shard failed, moving to the next candidate
[junit4] 2> 972325 INFO (qtp527904507-1867) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard2 r:core_node9
x:hdfsbackuprestore_shard2_replica_n6] o.a.s.c.ShardLeaderElectionContext We
failed sync, but we have no versions - we can't sync in that case - we were
active before, so become leader anyway
[junit4] 2> 972325 INFO (qtp527904507-1867) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard2 r:core_node9
x:hdfsbackuprestore_shard2_replica_n6] o.a.s.c.ShardLeaderElectionContext Found
all replicas participating in election, clear LIR
[junit4] 2> 972373 INFO (qtp1066405676-1871) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard1 r:core_node3
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.PeerSync PeerSync:
core=hdfsbackuprestore_shard1_replica_n1 url=http://127.0.0.1:45076/solr DONE.
We have no versions. sync failed.
[junit4] 2> 972373 INFO (qtp1066405676-1871) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard1 r:core_node3
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.SyncStrategy Leader's attempt to
sync with shard failed, moving to the next candidate
[junit4] 2> 972373 INFO (qtp1066405676-1871) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard1 r:core_node3
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext We
failed sync, but we have no versions - we can't sync in that case - we were
active before, so become leader anyway
[junit4] 2> 972373 INFO (qtp1066405676-1871) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard1 r:core_node3
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext Found
all replicas participating in election, clear LIR
[junit4] 2> 973709 INFO (qtp1066405676-1871) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard1 r:core_node3
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext I am
the new leader:
http://127.0.0.1:45076/solr/hdfsbackuprestore_shard1_replica_n1/ shard1
[junit4] 2> 973763 INFO (qtp527904507-1867) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard2 r:core_node9
x:hdfsbackuprestore_shard2_replica_n6] o.a.s.c.ShardLeaderElectionContext I am
the new leader:
http://127.0.0.1:56822/solr/hdfsbackuprestore_shard2_replica_n6/ shard2
[junit4] 2> 973892 INFO
(zkCallback-173-thread-1-processing-n:127.0.0.1:56822_solr)
[n:127.0.0.1:56822_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 973893 INFO
(zkCallback-173-thread-3-processing-n:127.0.0.1:56822_solr)
[n:127.0.0.1:56822_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 973893 INFO
(zkCallback-171-thread-3-processing-n:127.0.0.1:45076_solr)
[n:127.0.0.1:45076_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 973893 INFO
(zkCallback-171-thread-1-processing-n:127.0.0.1:45076_solr)
[n:127.0.0.1:45076_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 973950 INFO (qtp1066405676-1871) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard1 r:core_node3
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.ZkController I am the leader, no
recovery necessary
[junit4] 2> 973951 INFO (qtp527904507-1867) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard2 r:core_node9
x:hdfsbackuprestore_shard2_replica_n6] o.a.s.c.ZkController I am the leader, no
recovery necessary
[junit4] 2> 973951 INFO (indexFetcher-797-thread-1)
[n:127.0.0.1:56822_solr c:hdfsbackuprestore s:shard2 r:core_node12
x:hdfsbackuprestore_shard2_replica_p10] o.a.s.h.IndexFetcher Replica core_node9
is leader but it's state is down, skipping replication
[junit4] 2> 973961 INFO (qtp1066405676-1871) [n:127.0.0.1:45076_solr
c:hdfsbackuprestore s:shard1 r:core_node3
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.s.HttpSolrCall [admin] webapp=null
path=/admin/cores
params={qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore&version=2&replicaType=NRT&property.customKey=customValue&coreNodeName=core_node3&name=hdfsbackuprestore_shard1_replica_n1&action=CREATE&numShards=2&shard=shard1&wt=javabin}
status=0 QTime=10238
[junit4] 2> 973963 INFO (qtp527904507-1867) [n:127.0.0.1:56822_solr
c:hdfsbackuprestore s:shard2 r:core_node9
x:hdfsbackuprestore_shard2_replica_n6] o.a.s.s.HttpSolrCall [admin] webapp=null
path=/admin/cores
params={qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore&version=2&replicaType=NRT&property.customKey=customValue&coreNodeName=core_node9&name=hdfsbackuprestore_shard2_replica_n6&action=CREATE&numShards=2&shard=shard2&wt=javabin}
status=0 QTime=9565
[junit4] 2> 974070 INFO
(zkCallback-171-thread-3-processing-n:127.0.0.1:45076_solr)
[n:127.0.0.1:45076_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 974071 INFO
(zkCallback-171-thread-1-processing-n:127.0.0.1:45076_solr)
[n:127.0.0.1:45076_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 974074 INFO
(zkCallback-173-thread-1-processing-n:127.0.0.1:56822_solr)
[n:127.0.0.1:56822_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating...
[...truncated too long message...]
ve/workspace/Lucene-Solr-Tests-7.x/solr/core/test-lib/apacheds-interceptor-kerberos-2.0.0-M15.jar:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/core/test-lib/apacheds-interceptors-admin-2.0.0-M15.jar:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/core/test-lib/apacheds-interceptors-authn-2.0.0-M15.jar:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/core/test-lib/apacheds-interceptors-authz-2.0.0-M15.jar:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/core/test-lib/apacheds-interceptors-changelog-2.0.0-M15.jar:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/core/test-lib/apacheds-interceptors-collective-2.0.0-M15.jar:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/core/test-lib/apacheds-interceptors-event-2.0.0-M15.jar:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/core/test-lib/apacheds-interceptors-exception-2.0.0-M15.jar:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/core/test-lib/apacheds-interceptors-journal-2.0.0-M15.jar:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/core/test-lib/apacheds-interceptors-normalization-2.0.0-M15.jar:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/core/test-lib/apacheds-interceptors-operational-2.0.0-M15.jar:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/core/test-lib/apacheds-interceptors-referral-2.0.0-M15.jar:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/core/test-lib/apacheds-interceptors-schema-2.0.0-M15.jar:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/core/test-lib/apacheds-interceptors-subtree-2.0.0-M15.jar:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/core/test-lib/apacheds-interceptors-trigger-2.0.0-M15.jar:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/core/test-lib/apacheds-jdbm-partition-2.0.0-M15.jar:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/core/test-lib/apacheds-jdbm1-2.0.0-M2.jar:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/core/test-lib/apacheds-kerberos-codec-2.0.0-M15.jar:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/core/test-lib/apacheds-ldif-partition-2.0.0-M15.jar:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/core/test-lib/apacheds-mavibot-partition-2.0.0-M15.jar:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/core/test-lib/apacheds-protocol-kerberos-2.0.0-M15.jar:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/core/test-lib/apacheds-protocol-ldap-2.0.0-M15.jar:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/core/test-lib/apacheds-protocol-shared-2.0.0-M15.jar:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/core/test-lib/apacheds-xdbm-partition-2.0.0-M15.jar:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/core/test-lib/api-all-1.0.0-M20.jar:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/core/test-lib/bcprov-jdk15on-1.54.jar:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/core/test-lib/byte-buddy-1.6.2.jar:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/core/test-lib/commons-collections-3.2.2.jar:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/core/test-lib/commons-math3-3.6.1.jar:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/core/test-lib/ehcache-core-2.4.4.jar:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/core/test-lib/hadoop-common-2.7.4-tests.jar:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/core/test-lib/hadoop-hdfs-2.7.4-tests.jar:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/core/test-lib/hadoop-minikdc-2.7.4.jar:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/core/test-lib/htrace-core-3.2.0-incubating.jar:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/core/test-lib/jersey-core-1.9.jar:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/core/test-lib/jersey-server-1.9.jar:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/core/test-lib/jetty-6.1.26.jar:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/core/test-lib/jetty-sslengine-6.1.26.jar:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/core/test-lib/jetty-util-6.1.26.jar:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/core/test-lib/mina-core-2.0.0-M5.jar:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/core/test-lib/mockito-core-2.6.2.jar:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/core/test-lib/netty-all-4.0.36.Final.jar:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/core/test-lib/objenesis-2.5.jar:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/build/analysis/icu/lucene-analyzers-icu-7.3.0-SNAPSHOT.jar:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/contrib/solr-analysis-extras/classes/java:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/build/queryparser/classes/test:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/build/backward-codecs/classes/test:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/contrib/analysis-extras/lib/icu4j-60.2.jar:/home/jenkins/tools/ant/apache-ant-1.8.4/lib/ant-launcher.jar:/x1/jenkins/.ant/lib/ivy-2.4.0.jar:/home/jenkins/tools/ant/apache-ant-1.8.4/lib/ant-junit.jar:/home/jenkins/tools/ant/apache-ant-1.8.4/lib/ant-apache-log4j.jar:/home/jenkins/tools/ant/apache-ant-1.8.4/lib/ant-junit4.jar:/home/jenkins/tools/ant/apache-ant-1.8.4/lib/ant-jai.jar:/home/jenkins/tools/ant/apache-ant-1.8.4/lib/ant-javamail.jar:/home/jenkins/tools/ant/apache-ant-1.8.4/lib/ant-apache-bsf.jar:/home/jenkins/tools/ant/apache-ant-1.8.4/lib/ant-commons-net.jar:/home/jenkins/tools/ant/apache-ant-1.8.4/lib/ant-antlr.jar:/home/jenkins/tools/ant/apache-ant-1.8.4/lib/ant-jsch.jar:/home/jenkins/tools/ant/apache-ant-1.8.4/lib/ant-apache-oro.jar:/home/jenkins/tools/ant/apache-ant-1.8.4/lib/ant-commons-logging.jar:/home/jenkins/tools/ant/apache-ant-1.8.4/lib/ant.jar:/home/jenkins/tools/ant/apache-ant-1.8.4/lib/ant-netrexx.jar:/home/jenkins/tools/ant/apache-ant-1.8.4/lib/ant-testutil.jar:/home/jenkins/tools/ant/apache-ant-1.8.4/lib/ant-jdepend.jar:/home/jenkins/tools/ant/apache-ant-1.8.4/lib/ant-apache-bcel.jar:/home/jenkins/tools/ant/apache-ant-1.8.4/lib/ant-apache-xalan2.jar:/home/jenkins/tools/ant/apache-ant-1.8.4/lib/ant-apache-resolver.jar:/home/jenkins/tools/ant/apache-ant-1.8.4/lib/ant-jmf.jar:/home/jenkins/tools/ant/apache-ant-1.8.4/lib/ant-apache-regexp.jar:/home/jenkins/tools/ant/apache-ant-1.8.4/lib/ant-swing.jar:/usr/local/asfpackages/java/jdk1.8.0_144/lib/tools.jar:/x1/jenkins/.ivy2/cache/com.carrotsearch.randomizedtesting/junit4-ant/jars/junit4-ant-2.5.3.jar
com.carrotsearch.ant.tasks.junit4.slave.SlaveMainSafe -eventsfile
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/temp/junit4-J2-20180130_204902_6387734699034853613405.events
@/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/temp/junit4-J2-20180130_204902_638738875226015830346.suites
-stdin
[junit4] ERROR: JVM J2 ended with an exception: Forked process returned with
error code: 1. Very likely a JVM crash. See process stdout at:
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/temp/junit4-J2-20180130_204902_6384939566263072388628.sysout
See process stderr at:
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/temp/junit4-J2-20180130_204902_6381482280639190651496.syserr
[junit4] at
com.carrotsearch.ant.tasks.junit4.JUnit4.executeSlave(JUnit4.java:1519)
[junit4] at
com.carrotsearch.ant.tasks.junit4.JUnit4.access$000(JUnit4.java:126)
[junit4] at
com.carrotsearch.ant.tasks.junit4.JUnit4$2.call(JUnit4.java:982)
[junit4] at
com.carrotsearch.ant.tasks.junit4.JUnit4$2.call(JUnit4.java:979)
[junit4] at java.util.concurrent.FutureTask.run(FutureTask.java:266)
[junit4] at
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
[junit4] at
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
[junit4] at java.lang.Thread.run(Thread.java:748)
BUILD FAILED
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/build.xml:836: The
following error occurred while executing this line:
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/build.xml:780: The
following error occurred while executing this line:
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/build.xml:59: The
following error occurred while executing this line:
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build.xml:262:
The following error occurred while executing this line:
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/common-build.xml:556:
The following error occurred while executing this line:
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/common-build.xml:1513:
The following error occurred while executing this line:
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/common-build.xml:1038:
At least one slave process threw an exception, first: Forked process returned
with error code: 1. Very likely a JVM crash. See process stdout at:
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/temp/junit4-J1-20180130_204902_6397246301557099666982.sysout
See process stderr at:
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/temp/junit4-J1-20180130_204902_6397477289918787376473.syserr
Total time: 440 minutes 33 seconds
Build step 'Invoke Ant' marked build as failure
Archiving artifacts
[Fast Archiver] No artifacts from Lucene-Solr-Tests-7.x #342 to compare, so
performing full copy of artifacts
Recording test results
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]