Build: https://builds.apache.org/job/Lucene-Solr-Tests-7.x/183/
1 tests failed.
FAILED: org.apache.solr.cloud.TestHdfsCloudBackupRestore.test
Error Message:
expected:<{shard1=0, shard2=3}> but was:<{shard1=0, shard2=0}>
Stack Trace:
java.lang.AssertionError: expected:<{shard1=0, shard2=3}> but was:<{shard1=0,
shard2=0}>
at
__randomizedtesting.SeedInfo.seed([1A52D2B00C0736A5:9206ED6AA2FB5B5D]:0)
at org.junit.Assert.fail(Assert.java:93)
at org.junit.Assert.failNotEquals(Assert.java:647)
at org.junit.Assert.assertEquals(Assert.java:128)
at org.junit.Assert.assertEquals(Assert.java:147)
at
org.apache.solr.cloud.AbstractCloudBackupRestoreTestCase.testBackupAndRestore(AbstractCloudBackupRestoreTestCase.java:285)
at
org.apache.solr.cloud.AbstractCloudBackupRestoreTestCase.test(AbstractCloudBackupRestoreTestCase.java:136)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at
com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984)
at
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at
org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
at
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at
org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
at
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468)
at
com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41)
at
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
at
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at
org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at java.lang.Thread.run(Thread.java:748)
Build Log:
[...truncated 12061 lines...]
[junit4] Suite: org.apache.solr.cloud.TestHdfsCloudBackupRestore
[junit4] 2> 1221422 INFO
(SUITE-TestHdfsCloudBackupRestore-seed#[1A52D2B00C0736A5]-worker) [ ]
o.a.s.SolrTestCaseJ4 SecureRandom sanity checks:
test.solr.allowed.securerandom=null & java.security.egd=file:/dev/./urandom
[junit4] 2> Creating dataDir:
/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J0/temp/solr.cloud.TestHdfsCloudBackupRestore_1A52D2B00C0736A5-001/init-core-data-001
[junit4] 2> 1221423 WARN
(SUITE-TestHdfsCloudBackupRestore-seed#[1A52D2B00C0736A5]-worker) [ ]
o.a.s.SolrTestCaseJ4 startTrackingSearchers: numOpens=1 numCloses=1
[junit4] 2> 1221440 INFO
(SUITE-TestHdfsCloudBackupRestore-seed#[1A52D2B00C0736A5]-worker) [ ]
o.a.s.SolrTestCaseJ4 Using PointFields (NUMERIC_POINTS_SYSPROP=true)
w/NUMERIC_DOCVALUES_SYSPROP=false
[junit4] 2> 1221441 INFO
(SUITE-TestHdfsCloudBackupRestore-seed#[1A52D2B00C0736A5]-worker) [ ]
o.a.s.SolrTestCaseJ4 Randomized ssl (false) and clientAuth (true) via:
@org.apache.solr.util.RandomizeSSL(reason=, ssl=NaN, value=NaN, clientAuth=NaN)
[junit4] 1> Formatting using clusterid: testClusterID
[junit4] 2> 1221616 WARN
(SUITE-TestHdfsCloudBackupRestore-seed#[1A52D2B00C0736A5]-worker) [ ]
o.a.h.m.i.MetricsConfig Cannot locate configuration: tried
hadoop-metrics2-namenode.properties,hadoop-metrics2.properties
[junit4] 2> 1221667 WARN
(SUITE-TestHdfsCloudBackupRestore-seed#[1A52D2B00C0736A5]-worker) [ ]
o.a.h.h.HttpRequestLog Jetty request log can only be enabled using Log4j
[junit4] 2> 1221668 INFO
(SUITE-TestHdfsCloudBackupRestore-seed#[1A52D2B00C0736A5]-worker) [ ]
o.m.log jetty-6.1.26
[junit4] 2> 1221810 INFO
(SUITE-TestHdfsCloudBackupRestore-seed#[1A52D2B00C0736A5]-worker) [ ]
o.m.log Extract
jar:file:/home/jenkins/.ivy2/cache/org.apache.hadoop/hadoop-hdfs/tests/hadoop-hdfs-2.7.4-tests.jar!/webapps/hdfs
to ./temp/Jetty_lucene2.us.west_apache_org_39242_hdfs____.lmp18s/webapp
[junit4] 2> 1223386 INFO
(SUITE-TestHdfsCloudBackupRestore-seed#[1A52D2B00C0736A5]-worker) [ ]
o.m.log Started
HttpServer2$selectchannelconnectorwithsafestar...@lucene2-us-west.apache.org:39242
[junit4] 2> 1224005 WARN
(SUITE-TestHdfsCloudBackupRestore-seed#[1A52D2B00C0736A5]-worker) [ ]
o.a.h.h.HttpRequestLog Jetty request log can only be enabled using Log4j
[junit4] 2> 1224005 INFO
(SUITE-TestHdfsCloudBackupRestore-seed#[1A52D2B00C0736A5]-worker) [ ]
o.m.log jetty-6.1.26
[junit4] 2> 1224097 INFO
(SUITE-TestHdfsCloudBackupRestore-seed#[1A52D2B00C0736A5]-worker) [ ]
o.m.log Extract
jar:file:/home/jenkins/.ivy2/cache/org.apache.hadoop/hadoop-hdfs/tests/hadoop-hdfs-2.7.4-tests.jar!/webapps/datanode
to ./temp/Jetty_localhost_35942_datanode____.ca2284/webapp
[junit4] 2> 1225046 INFO
(SUITE-TestHdfsCloudBackupRestore-seed#[1A52D2B00C0736A5]-worker) [ ]
o.m.log Started
HttpServer2$SelectChannelConnectorWithSafeStartup@localhost:35942
[junit4] 2> 1225224 WARN
(SUITE-TestHdfsCloudBackupRestore-seed#[1A52D2B00C0736A5]-worker) [ ]
o.a.h.h.HttpRequestLog Jetty request log can only be enabled using Log4j
[junit4] 2> 1225225 INFO
(SUITE-TestHdfsCloudBackupRestore-seed#[1A52D2B00C0736A5]-worker) [ ]
o.m.log jetty-6.1.26
[junit4] 2> 1225262 INFO
(SUITE-TestHdfsCloudBackupRestore-seed#[1A52D2B00C0736A5]-worker) [ ]
o.m.log Extract
jar:file:/home/jenkins/.ivy2/cache/org.apache.hadoop/hadoop-hdfs/tests/hadoop-hdfs-2.7.4-tests.jar!/webapps/datanode
to ./temp/Jetty_localhost_40134_datanode____.6f56mb/webapp
[junit4] 2> 1225464 ERROR (DataNode:
[[[DISK]file:/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J0/temp/solr.cloud.TestHdfsCloudBackupRestore_1A52D2B00C0736A5-001/tempDir-001/hdfsBaseDir/data/data1/,
[DISK]file:/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J0/temp/solr.cloud.TestHdfsCloudBackupRestore_1A52D2B00C0736A5-001/tempDir-001/hdfsBaseDir/data/data2/]]
heartbeating to lucene2-us-west.apache.org/127.0.0.1:43133) [ ]
o.a.h.h.s.d.DirectoryScanner
dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value below 1
ms/sec. Assuming default value of 1000
[junit4] 2> 1225517 INFO (Block report processor) [ ] BlockStateChange
BLOCK* processReport 0x1cf5c5e94521b6: from storage
DS-051ac79c-83e4-4b11-ba0d-e9500bdddff4 node
DatanodeRegistration(127.0.0.1:44003,
datanodeUuid=fca6dce5-6985-4811-9b92-edf8040f3e0f, infoPort=46286,
infoSecurePort=0, ipcPort=41438,
storageInfo=lv=-56;cid=testClusterID;nsid=254902223;c=0), blocks: 0,
hasStaleStorage: true, processing time: 0 msecs
[junit4] 2> 1225517 INFO (Block report processor) [ ] BlockStateChange
BLOCK* processReport 0x1cf5c5e94521b6: from storage
DS-8a34a8d3-b413-4700-abfd-5b1a74cb71cc node
DatanodeRegistration(127.0.0.1:44003,
datanodeUuid=fca6dce5-6985-4811-9b92-edf8040f3e0f, infoPort=46286,
infoSecurePort=0, ipcPort=41438,
storageInfo=lv=-56;cid=testClusterID;nsid=254902223;c=0), blocks: 0,
hasStaleStorage: false, processing time: 0 msecs
[junit4] 2> 1226612 INFO
(SUITE-TestHdfsCloudBackupRestore-seed#[1A52D2B00C0736A5]-worker) [ ]
o.m.log Started
HttpServer2$SelectChannelConnectorWithSafeStartup@localhost:40134
[junit4] 2> 1227475 ERROR (DataNode:
[[[DISK]file:/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J0/temp/solr.cloud.TestHdfsCloudBackupRestore_1A52D2B00C0736A5-001/tempDir-001/hdfsBaseDir/data/data3/,
[DISK]file:/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J0/temp/solr.cloud.TestHdfsCloudBackupRestore_1A52D2B00C0736A5-001/tempDir-001/hdfsBaseDir/data/data4/]]
heartbeating to lucene2-us-west.apache.org/127.0.0.1:43133) [ ]
o.a.h.h.s.d.DirectoryScanner
dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value below 1
ms/sec. Assuming default value of 1000
[junit4] 2> 1227501 INFO (Block report processor) [ ] BlockStateChange
BLOCK* processReport 0x1cf5c6608fe121: from storage
DS-98bcdee0-a66f-413d-bc94-d1f8be5116cf node
DatanodeRegistration(127.0.0.1:33596,
datanodeUuid=618d2365-3371-42a3-9dfa-32f051628dd0, infoPort=43743,
infoSecurePort=0, ipcPort=45511,
storageInfo=lv=-56;cid=testClusterID;nsid=254902223;c=0), blocks: 0,
hasStaleStorage: true, processing time: 0 msecs
[junit4] 2> 1227501 INFO (Block report processor) [ ] BlockStateChange
BLOCK* processReport 0x1cf5c6608fe121: from storage
DS-b7143b2c-9597-4d25-a630-6bb600c802b1 node
DatanodeRegistration(127.0.0.1:33596,
datanodeUuid=618d2365-3371-42a3-9dfa-32f051628dd0, infoPort=43743,
infoSecurePort=0, ipcPort=45511,
storageInfo=lv=-56;cid=testClusterID;nsid=254902223;c=0), blocks: 0,
hasStaleStorage: false, processing time: 0 msecs
[junit4] 2> 1227620 WARN
(SUITE-TestHdfsCloudBackupRestore-seed#[1A52D2B00C0736A5]-worker) [ ]
o.a.s.c.TestHdfsCloudBackupRestore The NameNode is in SafeMode - Solr will wait
5 seconds and try again.
[junit4] 2> 1232631 WARN
(SUITE-TestHdfsCloudBackupRestore-seed#[1A52D2B00C0736A5]-worker) [ ]
o.a.s.c.TestHdfsCloudBackupRestore The NameNode is in SafeMode - Solr will wait
5 seconds and try again.
[junit4] 2> 1237676 INFO
(SUITE-TestHdfsCloudBackupRestore-seed#[1A52D2B00C0736A5]-worker) [ ]
o.a.s.c.MiniSolrCloudCluster Starting cluster of 2 servers in
/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J0/temp/solr.cloud.TestHdfsCloudBackupRestore_1A52D2B00C0736A5-001/tempDir-002
[junit4] 2> 1237676 INFO
(SUITE-TestHdfsCloudBackupRestore-seed#[1A52D2B00C0736A5]-worker) [ ]
o.a.s.c.ZkTestServer STARTING ZK TEST SERVER
[junit4] 2> 1237690 INFO (Thread-1751) [ ] o.a.s.c.ZkTestServer client
port:0.0.0.0/0.0.0.0:0
[junit4] 2> 1237690 INFO (Thread-1751) [ ] o.a.s.c.ZkTestServer
Starting server
[junit4] 2> 1237755 ERROR (Thread-1751) [ ] o.a.z.s.ZooKeeperServer
ZKShutdownHandler is not registered, so ZooKeeper server won't take any action
on ERROR or SHUTDOWN server state changes
[junit4] 2> 1237804 INFO
(SUITE-TestHdfsCloudBackupRestore-seed#[1A52D2B00C0736A5]-worker) [ ]
o.a.s.c.ZkTestServer start zk server on port:38810
[junit4] 2> 1237930 INFO (jetty-launcher-572-thread-1) [ ]
o.e.j.s.Server jetty-9.3.20.v20170531
[junit4] 2> 1237959 INFO (jetty-launcher-572-thread-1) [ ]
o.e.j.s.h.ContextHandler Started
o.e.j.s.ServletContextHandler@39a136dd{/solr,null,AVAILABLE}
[junit4] 2> 1237959 INFO (jetty-launcher-572-thread-1) [ ]
o.e.j.s.AbstractConnector Started
ServerConnector@f84d064{HTTP/1.1,[http/1.1]}{127.0.0.1:33650}
[junit4] 2> 1237959 INFO (jetty-launcher-572-thread-1) [ ]
o.e.j.s.Server Started @1250812ms
[junit4] 2> 1237959 INFO (jetty-launcher-572-thread-1) [ ]
o.a.s.c.s.e.JettySolrRunner Jetty properties: {hostContext=/solr,
hostPort=33650}
[junit4] 2> 1237993 ERROR (jetty-launcher-572-thread-1) [ ]
o.a.s.u.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be
missing or incomplete.
[junit4] 2> 1237993 INFO (jetty-launcher-572-thread-1) [ ]
o.a.s.s.SolrDispatchFilter ___ _ Welcome to Apache Solr? version
7.2.0
[junit4] 2> 1237993 INFO (jetty-launcher-572-thread-1) [ ]
o.a.s.s.SolrDispatchFilter / __| ___| |_ _ Starting in cloud mode on port null
[junit4] 2> 1237993 INFO (jetty-launcher-572-thread-1) [ ]
o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_| Install dir: null, Default config
dir: null
[junit4] 2> 1237993 INFO (jetty-launcher-572-thread-1) [ ]
o.a.s.s.SolrDispatchFilter |___/\___/_|_| Start time:
2017-10-15T04:37:12.934Z
[junit4] 2> 1238011 INFO (jetty-launcher-572-thread-2) [ ]
o.e.j.s.Server jetty-9.3.20.v20170531
[junit4] 2> 1238030 INFO (jetty-launcher-572-thread-2) [ ]
o.e.j.s.h.ContextHandler Started
o.e.j.s.ServletContextHandler@5144a051{/solr,null,AVAILABLE}
[junit4] 2> 1238030 INFO (jetty-launcher-572-thread-2) [ ]
o.e.j.s.AbstractConnector Started
ServerConnector@3ca679a{HTTP/1.1,[http/1.1]}{127.0.0.1:46340}
[junit4] 2> 1238030 INFO (jetty-launcher-572-thread-2) [ ]
o.e.j.s.Server Started @1250883ms
[junit4] 2> 1238030 INFO (jetty-launcher-572-thread-2) [ ]
o.a.s.c.s.e.JettySolrRunner Jetty properties: {hostContext=/solr,
hostPort=46340}
[junit4] 2> 1238030 ERROR (jetty-launcher-572-thread-2) [ ]
o.a.s.u.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be
missing or incomplete.
[junit4] 2> 1238030 INFO (jetty-launcher-572-thread-2) [ ]
o.a.s.s.SolrDispatchFilter ___ _ Welcome to Apache Solr? version
7.2.0
[junit4] 2> 1238031 INFO (jetty-launcher-572-thread-2) [ ]
o.a.s.s.SolrDispatchFilter / __| ___| |_ _ Starting in cloud mode on port null
[junit4] 2> 1238031 INFO (jetty-launcher-572-thread-2) [ ]
o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_| Install dir: null, Default config
dir: null
[junit4] 2> 1238031 INFO (jetty-launcher-572-thread-2) [ ]
o.a.s.s.SolrDispatchFilter |___/\___/_|_| Start time:
2017-10-15T04:37:12.972Z
[junit4] 2> 1238065 INFO (jetty-launcher-572-thread-2) [ ]
o.a.s.s.SolrDispatchFilter solr.xml found in ZooKeeper. Loading...
[junit4] 2> 1238085 INFO (jetty-launcher-572-thread-2) [ ]
o.a.s.c.SolrXmlConfig MBean server found:
com.sun.jmx.mbeanserver.JmxMBeanServer@14749a65, but no JMX reporters were
configured - adding default JMX reporter.
[junit4] 2> 1238093 INFO (jetty-launcher-572-thread-1) [ ]
o.a.s.s.SolrDispatchFilter solr.xml found in ZooKeeper. Loading...
[junit4] 2> 1238096 INFO (jetty-launcher-572-thread-1) [ ]
o.a.s.c.SolrXmlConfig MBean server found:
com.sun.jmx.mbeanserver.JmxMBeanServer@14749a65, but no JMX reporters were
configured - adding default JMX reporter.
[junit4] 2> 1238113 INFO (jetty-launcher-572-thread-2) [ ]
o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:38810/solr
[junit4] 2> 1238137 INFO (jetty-launcher-572-thread-1) [ ]
o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:38810/solr
[junit4] 2> 1238646 INFO (jetty-launcher-572-thread-1)
[n:127.0.0.1:33650_solr ] o.a.s.c.Overseer Overseer (id=null) closing
[junit4] 2> 1238647 INFO (jetty-launcher-572-thread-1)
[n:127.0.0.1:33650_solr ] o.a.s.c.OverseerElectionContext I am going to be
the leader 127.0.0.1:33650_solr
[junit4] 2> 1238648 INFO (jetty-launcher-572-thread-1)
[n:127.0.0.1:33650_solr ] o.a.s.c.Overseer Overseer
(id=98831055757770757-127.0.0.1:33650_solr-n_0000000000) starting
[junit4] 2> 1238725 INFO (jetty-launcher-572-thread-1)
[n:127.0.0.1:33650_solr ] o.a.s.c.ZkController Register node as live in
ZooKeeper:/live_nodes/127.0.0.1:33650_solr
[junit4] 2> 1238771 INFO
(zkCallback-583-thread-1-processing-n:127.0.0.1:33650_solr)
[n:127.0.0.1:33650_solr ] o.a.s.c.c.ZkStateReader Updated live nodes from
ZooKeeper... (0) -> (1)
[junit4] 2> 1238929 INFO (jetty-launcher-572-thread-1)
[n:127.0.0.1:33650_solr ] o.a.s.c.b.r.BackupRepositoryFactory Added backup
repository with configuration params {type = repository,name = hdfs,class =
org.apache.solr.core.backup.repository.HdfsBackupRepository,attributes =
{name=hdfs,
class=org.apache.solr.core.backup.repository.HdfsBackupRepository},args =
{location=/backup,solr.hdfs.home=hdfs://lucene2-us-west.apache.org:43133/solr,solr.hdfs.confdir=}}
[junit4] 2> 1238929 INFO (jetty-launcher-572-thread-1)
[n:127.0.0.1:33650_solr ] o.a.s.c.b.r.BackupRepositoryFactory Default
configuration for backup repository is with configuration params {type =
repository,name = hdfs,class =
org.apache.solr.core.backup.repository.HdfsBackupRepository,attributes =
{name=hdfs,
class=org.apache.solr.core.backup.repository.HdfsBackupRepository},args =
{location=/backup,solr.hdfs.home=hdfs://lucene2-us-west.apache.org:43133/solr,solr.hdfs.confdir=}}
[junit4] 2> 1239020 INFO (jetty-launcher-572-thread-2)
[n:127.0.0.1:46340_solr ] o.a.s.c.c.ZkStateReader Updated live nodes from
ZooKeeper... (0) -> (1)
[junit4] 2> 1239021 INFO (jetty-launcher-572-thread-2)
[n:127.0.0.1:46340_solr ] o.a.s.c.Overseer Overseer (id=null) closing
[junit4] 2> 1239023 INFO (jetty-launcher-572-thread-2)
[n:127.0.0.1:46340_solr ] o.a.s.c.ZkController Register node as live in
ZooKeeper:/live_nodes/127.0.0.1:46340_solr
[junit4] 2> 1239109 INFO
(zkCallback-584-thread-1-processing-n:127.0.0.1:46340_solr)
[n:127.0.0.1:46340_solr ] o.a.s.c.c.ZkStateReader Updated live nodes from
ZooKeeper... (1) -> (2)
[junit4] 2> 1239126 INFO (jetty-launcher-572-thread-1)
[n:127.0.0.1:33650_solr ] o.a.s.m.r.SolrJmxReporter JMX monitoring for
'solr.node' (registry 'solr.node') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@14749a65
[junit4] 2> 1239132 INFO (jetty-launcher-572-thread-1)
[n:127.0.0.1:33650_solr ] o.a.s.m.r.SolrJmxReporter JMX monitoring for
'solr.jvm' (registry 'solr.jvm') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@14749a65
[junit4] 2> 1239132 INFO (jetty-launcher-572-thread-1)
[n:127.0.0.1:33650_solr ] o.a.s.m.r.SolrJmxReporter JMX monitoring for
'solr.jetty' (registry 'solr.jetty') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@14749a65
[junit4] 2> 1239133 INFO (jetty-launcher-572-thread-1)
[n:127.0.0.1:33650_solr ] o.a.s.c.CorePropertiesLocator Found 0 core
definitions underneath
/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J0/temp/solr.cloud.TestHdfsCloudBackupRestore_1A52D2B00C0736A5-001/tempDir-002/node1/.
[junit4] 2> 1239171 INFO
(zkCallback-583-thread-2-processing-n:127.0.0.1:33650_solr)
[n:127.0.0.1:33650_solr ] o.a.s.c.c.ZkStateReader Updated live nodes from
ZooKeeper... (1) -> (2)
[junit4] 2> 1239324 INFO (jetty-launcher-572-thread-2)
[n:127.0.0.1:46340_solr ] o.a.s.c.b.r.BackupRepositoryFactory Added backup
repository with configuration params {type = repository,name = hdfs,class =
org.apache.solr.core.backup.repository.HdfsBackupRepository,attributes =
{name=hdfs,
class=org.apache.solr.core.backup.repository.HdfsBackupRepository},args =
{location=/backup,solr.hdfs.home=hdfs://lucene2-us-west.apache.org:43133/solr,solr.hdfs.confdir=}}
[junit4] 2> 1239337 INFO (jetty-launcher-572-thread-2)
[n:127.0.0.1:46340_solr ] o.a.s.c.b.r.BackupRepositoryFactory Default
configuration for backup repository is with configuration params {type =
repository,name = hdfs,class =
org.apache.solr.core.backup.repository.HdfsBackupRepository,attributes =
{name=hdfs,
class=org.apache.solr.core.backup.repository.HdfsBackupRepository},args =
{location=/backup,solr.hdfs.home=hdfs://lucene2-us-west.apache.org:43133/solr,solr.hdfs.confdir=}}
[junit4] 2> 1239434 INFO (jetty-launcher-572-thread-2)
[n:127.0.0.1:46340_solr ] o.a.s.m.r.SolrJmxReporter JMX monitoring for
'solr.node' (registry 'solr.node') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@14749a65
[junit4] 2> 1239579 INFO (jetty-launcher-572-thread-2)
[n:127.0.0.1:46340_solr ] o.a.s.m.r.SolrJmxReporter JMX monitoring for
'solr.jvm' (registry 'solr.jvm') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@14749a65
[junit4] 2> 1239580 INFO (jetty-launcher-572-thread-2)
[n:127.0.0.1:46340_solr ] o.a.s.m.r.SolrJmxReporter JMX monitoring for
'solr.jetty' (registry 'solr.jetty') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@14749a65
[junit4] 2> 1239630 INFO (jetty-launcher-572-thread-2)
[n:127.0.0.1:46340_solr ] o.a.s.c.CorePropertiesLocator Found 0 core
definitions underneath
/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J0/temp/solr.cloud.TestHdfsCloudBackupRestore_1A52D2B00C0736A5-001/tempDir-002/node2/.
[junit4] 2> 1239931 INFO
(SUITE-TestHdfsCloudBackupRestore-seed#[1A52D2B00C0736A5]-worker) [ ]
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (2)
[junit4] 2> 1239932 INFO
(SUITE-TestHdfsCloudBackupRestore-seed#[1A52D2B00C0736A5]-worker) [ ]
o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:38810/solr ready
[junit4] 2> 1240831 INFO
(TEST-TestHdfsCloudBackupRestore.test-seed#[1A52D2B00C0736A5]) [ ]
o.a.s.SolrTestCaseJ4 ###Starting test
[junit4] 2> 1240834 INFO (qtp1120325267-4889) [n:127.0.0.1:46340_solr
] o.a.s.h.a.CollectionsHandler Invoked Collection Action :create with params
replicationFactor=1&collection.configName=conf1&router.name=implicit&version=2&pullReplicas=0&shards=shard1,shard2&property.customKey=customValue&maxShardsPerNode=2&router.field=shard_s&autoAddReplicas=true&name=hdfsbackuprestore&nrtReplicas=1&action=CREATE&tlogReplicas=1&wt=javabin
and sendToOCPQueue=true
[junit4] 2> 1240868 INFO
(OverseerThreadFactory-1624-thread-1-processing-n:127.0.0.1:33650_solr)
[n:127.0.0.1:33650_solr ] o.a.s.c.CreateCollectionCmd Create collection
hdfsbackuprestore
[junit4] 2> 1241058 INFO
(OverseerStateUpdate-98831055757770757-127.0.0.1:33650_solr-n_0000000000)
[n:127.0.0.1:33650_solr ] o.a.s.c.o.SliceMutator createReplica() {
[junit4] 2> "operation":"ADDREPLICA",
[junit4] 2> "collection":"hdfsbackuprestore",
[junit4] 2> "shard":"shard1",
[junit4] 2> "core":"hdfsbackuprestore_shard1_replica_n1",
[junit4] 2> "state":"down",
[junit4] 2> "base_url":"http://127.0.0.1:46340/solr",
[junit4] 2> "type":"NRT"}
[junit4] 2> 1241059 INFO
(OverseerStateUpdate-98831055757770757-127.0.0.1:33650_solr-n_0000000000)
[n:127.0.0.1:33650_solr ] o.a.s.c.o.SliceMutator createReplica() {
[junit4] 2> "operation":"ADDREPLICA",
[junit4] 2> "collection":"hdfsbackuprestore",
[junit4] 2> "shard":"shard1",
[junit4] 2> "core":"hdfsbackuprestore_shard1_replica_t2",
[junit4] 2> "state":"down",
[junit4] 2> "base_url":"http://127.0.0.1:33650/solr",
[junit4] 2> "type":"TLOG"}
[junit4] 2> 1241060 INFO
(OverseerStateUpdate-98831055757770757-127.0.0.1:33650_solr-n_0000000000)
[n:127.0.0.1:33650_solr ] o.a.s.c.o.SliceMutator createReplica() {
[junit4] 2> "operation":"ADDREPLICA",
[junit4] 2> "collection":"hdfsbackuprestore",
[junit4] 2> "shard":"shard2",
[junit4] 2> "core":"hdfsbackuprestore_shard2_replica_n3",
[junit4] 2> "state":"down",
[junit4] 2> "base_url":"http://127.0.0.1:46340/solr",
[junit4] 2> "type":"NRT"}
[junit4] 2> 1241074 INFO
(OverseerStateUpdate-98831055757770757-127.0.0.1:33650_solr-n_0000000000)
[n:127.0.0.1:33650_solr ] o.a.s.c.o.SliceMutator createReplica() {
[junit4] 2> "operation":"ADDREPLICA",
[junit4] 2> "collection":"hdfsbackuprestore",
[junit4] 2> "shard":"shard2",
[junit4] 2> "core":"hdfsbackuprestore_shard2_replica_t7",
[junit4] 2> "state":"down",
[junit4] 2> "base_url":"http://127.0.0.1:33650/solr",
[junit4] 2> "type":"TLOG"}
[junit4] 2> 1241339 INFO (qtp1120325267-4887) [n:127.0.0.1:46340_solr
] o.a.s.h.a.CoreAdminOperation core create command
qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore&version=2&replicaType=NRT&property.customKey=customValue&coreNodeName=core_node4&name=hdfsbackuprestore_shard1_replica_n1&action=CREATE&numShards=2&shard=shard1&wt=javabin
[junit4] 2> 1241339 INFO (qtp1120325267-4887) [n:127.0.0.1:46340_solr
] o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for
2147483647 transient cores
[junit4] 2> 1241340 INFO (qtp1306448183-4876) [n:127.0.0.1:33650_solr
] o.a.s.h.a.CoreAdminOperation core create command
qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore&version=2&replicaType=TLOG&property.customKey=customValue&coreNodeName=core_node8&name=hdfsbackuprestore_shard2_replica_t7&action=CREATE&numShards=2&shard=shard2&wt=javabin
[junit4] 2> 1241340 INFO (qtp1306448183-4876) [n:127.0.0.1:33650_solr
] o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for
2147483647 transient cores
[junit4] 2> 1241341 INFO (qtp1306448183-4875) [n:127.0.0.1:33650_solr
] o.a.s.h.a.CoreAdminOperation core create command
qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore&version=2&replicaType=TLOG&property.customKey=customValue&coreNodeName=core_node5&name=hdfsbackuprestore_shard1_replica_t2&action=CREATE&numShards=2&shard=shard1&wt=javabin
[junit4] 2> 1241465 INFO (qtp1120325267-4891) [n:127.0.0.1:46340_solr
] o.a.s.h.a.CoreAdminOperation core create command
qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore&version=2&replicaType=NRT&property.customKey=customValue&coreNodeName=core_node6&name=hdfsbackuprestore_shard2_replica_n3&action=CREATE&numShards=2&shard=shard2&wt=javabin
[junit4] 2> 1241648 INFO
(zkCallback-583-thread-2-processing-n:127.0.0.1:33650_solr)
[n:127.0.0.1:33650_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 1241648 INFO
(zkCallback-583-thread-1-processing-n:127.0.0.1:33650_solr)
[n:127.0.0.1:33650_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 1241664 INFO
(zkCallback-584-thread-1-processing-n:127.0.0.1:46340_solr)
[n:127.0.0.1:46340_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 1241664 INFO
(zkCallback-583-thread-3-processing-n:127.0.0.1:33650_solr)
[n:127.0.0.1:33650_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 1241701 INFO
(zkCallback-584-thread-2-processing-n:127.0.0.1:46340_solr)
[n:127.0.0.1:46340_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 1241725 INFO
(zkCallback-584-thread-3-processing-n:127.0.0.1:46340_solr)
[n:127.0.0.1:46340_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 1242516 INFO (qtp1120325267-4887) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard1 r:core_node4
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.SolrConfig Using Lucene
MatchVersion: 7.2.0
[junit4] 2> 1242591 INFO (qtp1120325267-4887) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard1 r:core_node4
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.s.IndexSchema
[hdfsbackuprestore_shard1_replica_n1] Schema name=minimal
[junit4] 2> 1242629 INFO (qtp1120325267-4891) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard2 r:core_node6
x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.SolrConfig Using Lucene
MatchVersion: 7.2.0
[junit4] 2> 1242642 INFO (qtp1120325267-4887) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard1 r:core_node4
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.s.IndexSchema Loaded schema
minimal/1.1 with uniqueid field id
[junit4] 2> 1242642 INFO (qtp1120325267-4887) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard1 r:core_node4
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.CoreContainer Creating SolrCore
'hdfsbackuprestore_shard1_replica_n1' using configuration from collection
hdfsbackuprestore, trusted=true
[junit4] 2> 1242643 INFO (qtp1120325267-4887) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard1 r:core_node4
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.m.r.SolrJmxReporter JMX monitoring
for 'solr.core.hdfsbackuprestore.shard1.replica_n1' (registry
'solr.core.hdfsbackuprestore.shard1.replica_n1') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@14749a65
[junit4] 2> 1242643 INFO (qtp1120325267-4887) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard1 r:core_node4
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.SolrCore
solr.RecoveryStrategy.Builder
[junit4] 2> 1242643 INFO (qtp1120325267-4887) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard1 r:core_node4
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.SolrCore
[[hdfsbackuprestore_shard1_replica_n1] ] Opening new SolrCore at
[/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J0/temp/solr.cloud.TestHdfsCloudBackupRestore_1A52D2B00C0736A5-001/tempDir-002/node2/hdfsbackuprestore_shard1_replica_n1],
dataDir=[/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J0/temp/solr.cloud.TestHdfsCloudBackupRestore_1A52D2B00C0736A5-001/tempDir-002/node2/./hdfsbackuprestore_shard1_replica_n1/data/]
[junit4] 2> 1242647 INFO (qtp1120325267-4891) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard2 r:core_node6
x:hdfsbackuprestore_shard2_replica_n3] o.a.s.s.IndexSchema
[hdfsbackuprestore_shard2_replica_n3] Schema name=minimal
[junit4] 2> 1242661 INFO (qtp1120325267-4891) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard2 r:core_node6
x:hdfsbackuprestore_shard2_replica_n3] o.a.s.s.IndexSchema Loaded schema
minimal/1.1 with uniqueid field id
[junit4] 2> 1242661 INFO (qtp1120325267-4891) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard2 r:core_node6
x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.CoreContainer Creating SolrCore
'hdfsbackuprestore_shard2_replica_n3' using configuration from collection
hdfsbackuprestore, trusted=true
[junit4] 2> 1242662 INFO (qtp1120325267-4891) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard2 r:core_node6
x:hdfsbackuprestore_shard2_replica_n3] o.a.s.m.r.SolrJmxReporter JMX monitoring
for 'solr.core.hdfsbackuprestore.shard2.replica_n3' (registry
'solr.core.hdfsbackuprestore.shard2.replica_n3') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@14749a65
[junit4] 2> 1242662 INFO (qtp1120325267-4891) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard2 r:core_node6
x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.SolrCore
solr.RecoveryStrategy.Builder
[junit4] 2> 1242662 INFO (qtp1120325267-4891) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard2 r:core_node6
x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.SolrCore
[[hdfsbackuprestore_shard2_replica_n3] ] Opening new SolrCore at
[/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J0/temp/solr.cloud.TestHdfsCloudBackupRestore_1A52D2B00C0736A5-001/tempDir-002/node2/hdfsbackuprestore_shard2_replica_n3],
dataDir=[/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J0/temp/solr.cloud.TestHdfsCloudBackupRestore_1A52D2B00C0736A5-001/tempDir-002/node2/./hdfsbackuprestore_shard2_replica_n3/data/]
[junit4] 2> 1242698 INFO (qtp1306448183-4875) [n:127.0.0.1:33650_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_t2] o.a.s.c.SolrConfig Using Lucene
MatchVersion: 7.2.0
[junit4] 2> 1242701 INFO (qtp1306448183-4876) [n:127.0.0.1:33650_solr
c:hdfsbackuprestore s:shard2 r:core_node8
x:hdfsbackuprestore_shard2_replica_t7] o.a.s.c.SolrConfig Using Lucene
MatchVersion: 7.2.0
[junit4] 2> 1242883 INFO (qtp1306448183-4876) [n:127.0.0.1:33650_solr
c:hdfsbackuprestore s:shard2 r:core_node8
x:hdfsbackuprestore_shard2_replica_t7] o.a.s.s.IndexSchema
[hdfsbackuprestore_shard2_replica_t7] Schema name=minimal
[junit4] 2> 1242918 INFO (qtp1306448183-4875) [n:127.0.0.1:33650_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_t2] o.a.s.s.IndexSchema
[hdfsbackuprestore_shard1_replica_t2] Schema name=minimal
[junit4] 2> 1242918 INFO (qtp1306448183-4876) [n:127.0.0.1:33650_solr
c:hdfsbackuprestore s:shard2 r:core_node8
x:hdfsbackuprestore_shard2_replica_t7] o.a.s.s.IndexSchema Loaded schema
minimal/1.1 with uniqueid field id
[junit4] 2> 1242918 INFO (qtp1306448183-4876) [n:127.0.0.1:33650_solr
c:hdfsbackuprestore s:shard2 r:core_node8
x:hdfsbackuprestore_shard2_replica_t7] o.a.s.c.CoreContainer Creating SolrCore
'hdfsbackuprestore_shard2_replica_t7' using configuration from collection
hdfsbackuprestore, trusted=true
[junit4] 2> 1242918 INFO (qtp1306448183-4876) [n:127.0.0.1:33650_solr
c:hdfsbackuprestore s:shard2 r:core_node8
x:hdfsbackuprestore_shard2_replica_t7] o.a.s.m.r.SolrJmxReporter JMX monitoring
for 'solr.core.hdfsbackuprestore.shard2.replica_t7' (registry
'solr.core.hdfsbackuprestore.shard2.replica_t7') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@14749a65
[junit4] 2> 1242918 INFO (qtp1306448183-4876) [n:127.0.0.1:33650_solr
c:hdfsbackuprestore s:shard2 r:core_node8
x:hdfsbackuprestore_shard2_replica_t7] o.a.s.c.SolrCore
solr.RecoveryStrategy.Builder
[junit4] 2> 1242918 INFO (qtp1306448183-4876) [n:127.0.0.1:33650_solr
c:hdfsbackuprestore s:shard2 r:core_node8
x:hdfsbackuprestore_shard2_replica_t7] o.a.s.c.SolrCore
[[hdfsbackuprestore_shard2_replica_t7] ] Opening new SolrCore at
[/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J0/temp/solr.cloud.TestHdfsCloudBackupRestore_1A52D2B00C0736A5-001/tempDir-002/node1/hdfsbackuprestore_shard2_replica_t7],
dataDir=[/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J0/temp/solr.cloud.TestHdfsCloudBackupRestore_1A52D2B00C0736A5-001/tempDir-002/node1/./hdfsbackuprestore_shard2_replica_t7/data/]
[junit4] 2> 1242919 INFO (qtp1306448183-4875) [n:127.0.0.1:33650_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_t2] o.a.s.s.IndexSchema Loaded schema
minimal/1.1 with uniqueid field id
[junit4] 2> 1242919 INFO (qtp1306448183-4875) [n:127.0.0.1:33650_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_t2] o.a.s.c.CoreContainer Creating SolrCore
'hdfsbackuprestore_shard1_replica_t2' using configuration from collection
hdfsbackuprestore, trusted=true
[junit4] 2> 1242920 INFO (qtp1306448183-4875) [n:127.0.0.1:33650_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_t2] o.a.s.m.r.SolrJmxReporter JMX monitoring
for 'solr.core.hdfsbackuprestore.shard1.replica_t2' (registry
'solr.core.hdfsbackuprestore.shard1.replica_t2') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@14749a65
[junit4] 2> 1242920 INFO (qtp1306448183-4875) [n:127.0.0.1:33650_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_t2] o.a.s.c.SolrCore
solr.RecoveryStrategy.Builder
[junit4] 2> 1242920 INFO (qtp1306448183-4875) [n:127.0.0.1:33650_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_t2] o.a.s.c.SolrCore
[[hdfsbackuprestore_shard1_replica_t2] ] Opening new SolrCore at
[/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J0/temp/solr.cloud.TestHdfsCloudBackupRestore_1A52D2B00C0736A5-001/tempDir-002/node1/hdfsbackuprestore_shard1_replica_t2],
dataDir=[/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J0/temp/solr.cloud.TestHdfsCloudBackupRestore_1A52D2B00C0736A5-001/tempDir-002/node1/./hdfsbackuprestore_shard1_replica_t2/data/]
[junit4] 2> 1243193 INFO (qtp1120325267-4891) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard2 r:core_node6
x:hdfsbackuprestore_shard2_replica_n3] o.a.s.u.UpdateHandler Using UpdateLog
implementation: org.apache.solr.update.UpdateLog
[junit4] 2> 1243193 INFO (qtp1120325267-4891) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard2 r:core_node6
x:hdfsbackuprestore_shard2_replica_n3] o.a.s.u.UpdateLog Initializing
UpdateLog: dataDir=null defaultSyncLevel=FLUSH numRecordsToKeep=100
maxNumLogsToKeep=10 numVersionBuckets=65536
[junit4] 2> 1243194 INFO (qtp1120325267-4891) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard2 r:core_node6
x:hdfsbackuprestore_shard2_replica_n3] o.a.s.u.CommitTracker Hard AutoCommit:
disabled
[junit4] 2> 1243195 INFO (qtp1120325267-4891) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard2 r:core_node6
x:hdfsbackuprestore_shard2_replica_n3] o.a.s.u.CommitTracker Soft AutoCommit:
disabled
[junit4] 2> 1243196 INFO (qtp1120325267-4891) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard2 r:core_node6
x:hdfsbackuprestore_shard2_replica_n3] o.a.s.s.SolrIndexSearcher Opening
[Searcher@536ffc2e[hdfsbackuprestore_shard2_replica_n3] main]
[junit4] 2> 1243230 INFO (qtp1120325267-4891) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard2 r:core_node6
x:hdfsbackuprestore_shard2_replica_n3] o.a.s.r.ManagedResourceStorage
Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
[junit4] 2> 1243230 INFO (qtp1120325267-4891) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard2 r:core_node6
x:hdfsbackuprestore_shard2_replica_n3] o.a.s.r.ManagedResourceStorage Loaded
null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
[junit4] 2> 1243231 INFO (qtp1120325267-4891) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard2 r:core_node6
x:hdfsbackuprestore_shard2_replica_n3] o.a.s.h.ReplicationHandler Commits will
be reserved for 10000ms.
[junit4] 2> 1243231 INFO (qtp1120325267-4891) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard2 r:core_node6
x:hdfsbackuprestore_shard2_replica_n3] o.a.s.u.UpdateLog Could not find max
version in index or recent updates, using new clock 1581296897933443072
[junit4] 2> 1243283 INFO
(searcherExecutor-1630-thread-1-processing-n:127.0.0.1:46340_solr
x:hdfsbackuprestore_shard2_replica_n3 s:shard2 c:hdfsbackuprestore
r:core_node6) [n:127.0.0.1:46340_solr c:hdfsbackuprestore s:shard2 r:core_node6
x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.SolrCore
[hdfsbackuprestore_shard2_replica_n3] Registered new searcher
Searcher@536ffc2e[hdfsbackuprestore_shard2_replica_n3]
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
[junit4] 2> 1243319 INFO (qtp1120325267-4887) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard1 r:core_node4
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.UpdateHandler Using UpdateLog
implementation: org.apache.solr.update.UpdateLog
[junit4] 2> 1243319 INFO (qtp1120325267-4887) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard1 r:core_node4
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.UpdateLog Initializing
UpdateLog: dataDir=null defaultSyncLevel=FLUSH numRecordsToKeep=100
maxNumLogsToKeep=10 numVersionBuckets=65536
[junit4] 2> 1243320 INFO (qtp1120325267-4887) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard1 r:core_node4
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.CommitTracker Hard AutoCommit:
disabled
[junit4] 2> 1243320 INFO (qtp1120325267-4887) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard1 r:core_node4
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.CommitTracker Soft AutoCommit:
disabled
[junit4] 2> 1243342 INFO (qtp1120325267-4891) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard2 r:core_node6
x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.ShardLeaderElectionContext
Waiting until we see more replicas up for shard shard2: total=2 found=1
timeoutin=9999ms
[junit4] 2> 1243392 INFO (qtp1120325267-4887) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard1 r:core_node4
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.s.SolrIndexSearcher Opening
[Searcher@6fce71af[hdfsbackuprestore_shard1_replica_n1] main]
[junit4] 2> 1243393 INFO (qtp1120325267-4887) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard1 r:core_node4
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.r.ManagedResourceStorage
Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
[junit4] 2> 1243393 INFO (qtp1120325267-4887) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard1 r:core_node4
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.r.ManagedResourceStorage Loaded
null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
[junit4] 2> 1243394 INFO (qtp1120325267-4887) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard1 r:core_node4
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.h.ReplicationHandler Commits will
be reserved for 10000ms.
[junit4] 2> 1243394 INFO (qtp1120325267-4887) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard1 r:core_node4
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.UpdateLog Could not find max
version in index or recent updates, using new clock 1581296898104360960
[junit4] 2> 1243503 INFO
(zkCallback-583-thread-1-processing-n:127.0.0.1:33650_solr)
[n:127.0.0.1:33650_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 1243505 INFO (qtp1120325267-4887) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard1 r:core_node4
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext
Waiting until we see more replicas up for shard shard1: total=2 found=1
timeoutin=9999ms
[junit4] 2> 1243518 INFO (qtp1306448183-4875) [n:127.0.0.1:33650_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_t2] o.a.s.u.UpdateHandler Using UpdateLog
implementation: org.apache.solr.update.UpdateLog
[junit4] 2> 1243518 INFO (qtp1306448183-4875) [n:127.0.0.1:33650_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_t2] o.a.s.u.UpdateLog Initializing
UpdateLog: dataDir=null defaultSyncLevel=FLUSH numRecordsToKeep=100
maxNumLogsToKeep=10 numVersionBuckets=65536
[junit4] 2> 1243519 INFO (qtp1306448183-4875) [n:127.0.0.1:33650_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_t2] o.a.s.u.CommitTracker Hard AutoCommit:
disabled
[junit4] 2> 1243519 INFO (qtp1306448183-4875) [n:127.0.0.1:33650_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_t2] o.a.s.u.CommitTracker Soft AutoCommit:
disabled
[junit4] 2> 1243521 INFO (qtp1306448183-4875) [n:127.0.0.1:33650_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_t2] o.a.s.s.SolrIndexSearcher Opening
[Searcher@5e1cb8a4[hdfsbackuprestore_shard1_replica_t2] main]
[junit4] 2> 1243539 INFO (qtp1306448183-4875) [n:127.0.0.1:33650_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_t2] o.a.s.r.ManagedResourceStorage
Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
[junit4] 2> 1243539 INFO (qtp1306448183-4875) [n:127.0.0.1:33650_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_t2] o.a.s.r.ManagedResourceStorage Loaded
null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
[junit4] 2> 1243540 INFO (qtp1306448183-4875) [n:127.0.0.1:33650_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_t2] o.a.s.h.ReplicationHandler Commits will
be reserved for 10000ms.
[junit4] 2> 1243540 INFO (qtp1306448183-4875) [n:127.0.0.1:33650_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_t2] o.a.s.u.UpdateLog Could not find max
version in index or recent updates, using new clock 1581296898257453056
[junit4] 2> 1243577 INFO
(searcherExecutor-1629-thread-1-processing-n:127.0.0.1:46340_solr
x:hdfsbackuprestore_shard1_replica_n1 s:shard1 c:hdfsbackuprestore
r:core_node4) [n:127.0.0.1:46340_solr c:hdfsbackuprestore s:shard1 r:core_node4
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.SolrCore
[hdfsbackuprestore_shard1_replica_n1] Registered new searcher
Searcher@6fce71af[hdfsbackuprestore_shard1_replica_n1]
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
[junit4] 2> 1243578 INFO
(searcherExecutor-1632-thread-1-processing-n:127.0.0.1:33650_solr
x:hdfsbackuprestore_shard1_replica_t2 s:shard1 c:hdfsbackuprestore
r:core_node5) [n:127.0.0.1:33650_solr c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_t2] o.a.s.c.SolrCore
[hdfsbackuprestore_shard1_replica_t2] Registered new searcher
Searcher@5e1cb8a4[hdfsbackuprestore_shard1_replica_t2]
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
[junit4] 2> 1243592 INFO (qtp1306448183-4876) [n:127.0.0.1:33650_solr
c:hdfsbackuprestore s:shard2 r:core_node8
x:hdfsbackuprestore_shard2_replica_t7] o.a.s.u.UpdateHandler Using UpdateLog
implementation: org.apache.solr.update.UpdateLog
[junit4] 2> 1243592 INFO (qtp1306448183-4876) [n:127.0.0.1:33650_solr
c:hdfsbackuprestore s:shard2 r:core_node8
x:hdfsbackuprestore_shard2_replica_t7] o.a.s.u.UpdateLog Initializing
UpdateLog: dataDir=null defaultSyncLevel=FLUSH numRecordsToKeep=100
maxNumLogsToKeep=10 numVersionBuckets=65536
[junit4] 2> 1243593 INFO (qtp1306448183-4876) [n:127.0.0.1:33650_solr
c:hdfsbackuprestore s:shard2 r:core_node8
x:hdfsbackuprestore_shard2_replica_t7] o.a.s.u.CommitTracker Hard AutoCommit:
disabled
[junit4] 2> 1243593 INFO (qtp1306448183-4876) [n:127.0.0.1:33650_solr
c:hdfsbackuprestore s:shard2 r:core_node8
x:hdfsbackuprestore_shard2_replica_t7] o.a.s.u.CommitTracker Soft AutoCommit:
disabled
[junit4] 2> 1243594 INFO
(zkCallback-583-thread-3-processing-n:127.0.0.1:33650_solr)
[n:127.0.0.1:33650_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 1243594 INFO
(zkCallback-583-thread-1-processing-n:127.0.0.1:33650_solr)
[n:127.0.0.1:33650_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 1243594 INFO
(zkCallback-584-thread-2-processing-n:127.0.0.1:46340_solr)
[n:127.0.0.1:46340_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 1243594 INFO
(zkCallback-584-thread-1-processing-n:127.0.0.1:46340_solr)
[n:127.0.0.1:46340_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 1243596 INFO
(zkCallback-584-thread-3-processing-n:127.0.0.1:46340_solr)
[n:127.0.0.1:46340_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 1243665 INFO (qtp1306448183-4876) [n:127.0.0.1:33650_solr
c:hdfsbackuprestore s:shard2 r:core_node8
x:hdfsbackuprestore_shard2_replica_t7] o.a.s.s.SolrIndexSearcher Opening
[Searcher@56a59427[hdfsbackuprestore_shard2_replica_t7] main]
[junit4] 2> 1243671 INFO (qtp1306448183-4876) [n:127.0.0.1:33650_solr
c:hdfsbackuprestore s:shard2 r:core_node8
x:hdfsbackuprestore_shard2_replica_t7] o.a.s.r.ManagedResourceStorage
Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
[junit4] 2> 1243672 INFO (qtp1306448183-4876) [n:127.0.0.1:33650_solr
c:hdfsbackuprestore s:shard2 r:core_node8
x:hdfsbackuprestore_shard2_replica_t7] o.a.s.r.ManagedResourceStorage Loaded
null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
[junit4] 2> 1243672 INFO (qtp1306448183-4876) [n:127.0.0.1:33650_solr
c:hdfsbackuprestore s:shard2 r:core_node8
x:hdfsbackuprestore_shard2_replica_t7] o.a.s.h.ReplicationHandler Commits will
be reserved for 10000ms.
[junit4] 2> 1243673 INFO (qtp1306448183-4876) [n:127.0.0.1:33650_solr
c:hdfsbackuprestore s:shard2 r:core_node8
x:hdfsbackuprestore_shard2_replica_t7] o.a.s.u.UpdateLog Could not find max
version in index or recent updates, using new clock 1581296898395865088
[junit4] 2> 1243723 INFO
(searcherExecutor-1631-thread-1-processing-n:127.0.0.1:33650_solr
x:hdfsbackuprestore_shard2_replica_t7 s:shard2 c:hdfsbackuprestore
r:core_node8) [n:127.0.0.1:33650_solr c:hdfsbackuprestore s:shard2 r:core_node8
x:hdfsbackuprestore_shard2_replica_t7] o.a.s.c.SolrCore
[hdfsbackuprestore_shard2_replica_t7] Registered new searcher
Searcher@56a59427[hdfsbackuprestore_shard2_replica_t7]
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
[junit4] 2> 1243724 INFO
(zkCallback-584-thread-2-processing-n:127.0.0.1:46340_solr)
[n:127.0.0.1:46340_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 1243724 INFO
(zkCallback-584-thread-1-processing-n:127.0.0.1:46340_solr)
[n:127.0.0.1:46340_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 1243724 INFO
(zkCallback-584-thread-3-processing-n:127.0.0.1:46340_solr)
[n:127.0.0.1:46340_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 1243724 INFO
(zkCallback-583-thread-3-processing-n:127.0.0.1:33650_solr)
[n:127.0.0.1:33650_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 1243724 INFO
(zkCallback-583-thread-4-processing-n:127.0.0.1:33650_solr)
[n:127.0.0.1:33650_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 1243724 INFO
(zkCallback-583-thread-1-processing-n:127.0.0.1:33650_solr)
[n:127.0.0.1:33650_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 1243848 INFO (qtp1120325267-4891) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard2 r:core_node6
x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.ShardLeaderElectionContext
Enough replicas found to continue.
[junit4] 2> 1243848 INFO (qtp1120325267-4891) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard2 r:core_node6
x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.ShardLeaderElectionContext I may
be the new leader - try and sync
[junit4] 2> 1243848 INFO (qtp1120325267-4891) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard2 r:core_node6
x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.SyncStrategy Sync replicas to
http://127.0.0.1:46340/solr/hdfsbackuprestore_shard2_replica_n3/
[junit4] 2> 1243849 INFO (qtp1120325267-4891) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard2 r:core_node6
x:hdfsbackuprestore_shard2_replica_n3] o.a.s.u.PeerSync PeerSync:
core=hdfsbackuprestore_shard2_replica_n3 url=http://127.0.0.1:46340/solr START
replicas=[http://127.0.0.1:33650/solr/hdfsbackuprestore_shard2_replica_t7/]
nUpdates=100
[junit4] 2> 1243921 INFO (qtp1306448183-4879) [n:127.0.0.1:33650_solr
c:hdfsbackuprestore s:shard2 r:core_node8
x:hdfsbackuprestore_shard2_replica_t7] o.a.s.c.S.Request
[hdfsbackuprestore_shard2_replica_t7] webapp=/solr path=/get
params={distrib=false&qt=/get&fingerprint=false&getVersions=100&wt=javabin&version=2}
status=0 QTime=19
[junit4] 2> 1244011 INFO (qtp1120325267-4887) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard1 r:core_node4
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext
Enough replicas found to continue.
[junit4] 2> 1244011 INFO (qtp1120325267-4887) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard1 r:core_node4
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext I may
be the new leader - try and sync
[junit4] 2> 1244011 INFO (qtp1120325267-4887) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard1 r:core_node4
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.SyncStrategy Sync replicas to
http://127.0.0.1:46340/solr/hdfsbackuprestore_shard1_replica_n1/
[junit4] 2> 1244011 INFO (qtp1120325267-4887) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard1 r:core_node4
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.PeerSync PeerSync:
core=hdfsbackuprestore_shard1_replica_n1 url=http://127.0.0.1:46340/solr START
replicas=[http://127.0.0.1:33650/solr/hdfsbackuprestore_shard1_replica_t2/]
nUpdates=100
[junit4] 2> 1244026 INFO (qtp1306448183-4880) [n:127.0.0.1:33650_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_t2] o.a.s.c.S.Request
[hdfsbackuprestore_shard1_replica_t2] webapp=/solr path=/get
params={distrib=false&qt=/get&fingerprint=false&getVersions=100&wt=javabin&version=2}
status=0 QTime=14
[junit4] 2> 1244173 INFO (qtp1120325267-4891) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard2 r:core_node6
x:hdfsbackuprestore_shard2_replica_n3] o.a.s.u.PeerSync PeerSync:
core=hdfsbackuprestore_shard2_replica_n3 url=http://127.0.0.1:46340/solr DONE.
We have no versions. sync failed.
[junit4] 2> 1244173 INFO (qtp1120325267-4891) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard2 r:core_node6
x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.SyncStrategy Leader's attempt to
sync with shard failed, moving to the next candidate
[junit4] 2> 1244173 INFO (qtp1120325267-4891) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard2 r:core_node6
x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.ShardLeaderElectionContext We
failed sync, but we have no versions - we can't sync in that case - we were
active before, so become leader anyway
[junit4] 2> 1244173 INFO (qtp1120325267-4891) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard2 r:core_node6
x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.ShardLeaderElectionContext Found
all replicas participating in election, clear LIR
[junit4] 2> 1244175 INFO (qtp1120325267-4891) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard2 r:core_node6
x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.ShardLeaderElectionContext I am
the new leader:
http://127.0.0.1:46340/solr/hdfsbackuprestore_shard2_replica_n3/ shard2
[junit4] 2> 1244319 INFO (qtp1120325267-4887) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard1 r:core_node4
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.PeerSync PeerSync:
core=hdfsbackuprestore_shard1_replica_n1 url=http://127.0.0.1:46340/solr DONE.
We have no versions. sync failed.
[junit4] 2> 1244319 INFO (qtp1120325267-4887) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard1 r:core_node4
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.SyncStrategy Leader's attempt to
sync with shard failed, moving to the next candidate
[junit4] 2> 1244319 INFO (qtp1120325267-4887) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard1 r:core_node4
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext We
failed sync, but we have no versions - we can't sync in that case - we were
active before, so become leader anyway
[junit4] 2> 1244319 INFO (qtp1120325267-4887) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard1 r:core_node4
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext Found
all replicas participating in election, clear LIR
[junit4] 2> 1244321 INFO (qtp1120325267-4887) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard1 r:core_node4
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext I am
the new leader:
http://127.0.0.1:46340/solr/hdfsbackuprestore_shard1_replica_n1/ shard1
[junit4] 2> 1244506 INFO
(zkCallback-583-thread-1-processing-n:127.0.0.1:33650_solr)
[n:127.0.0.1:33650_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 1244520 INFO
(zkCallback-583-thread-3-processing-n:127.0.0.1:33650_solr)
[n:127.0.0.1:33650_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 1244520 INFO
(zkCallback-583-thread-4-processing-n:127.0.0.1:33650_solr)
[n:127.0.0.1:33650_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 1244522 INFO
(zkCallback-584-thread-2-processing-n:127.0.0.1:46340_solr)
[n:127.0.0.1:46340_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 1244522 INFO
(zkCallback-584-thread-3-processing-n:127.0.0.1:46340_solr)
[n:127.0.0.1:46340_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 1244522 INFO
(zkCallback-584-thread-1-processing-n:127.0.0.1:46340_solr)
[n:127.0.0.1:46340_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 1244538 INFO (qtp1120325267-4887) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard1 r:core_node4
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.ZkController I am the leader, no
recovery necessary
[junit4] 2> 1244560 INFO (qtp1120325267-4891) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard2 r:core_node6
x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.ZkController I am the leader, no
recovery necessary
[junit4] 2> 1244561 INFO (qtp1120325267-4887) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard1 r:core_node4
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.s.HttpSolrCall [admin] webapp=null
path=/admin/cores
params={qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore&version=2&replicaType=NRT&property.customKey=customValue&coreNodeName=core_node4&name=hdfsbackuprestore_shard1_replica_n1&action=CREATE&numShards=2&shard=shard1&wt=javabin}
status=0 QTime=3222
[junit4] 2> 1244562 INFO (qtp1120325267-4891) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard2 r:core_node6
x:hdfsbackuprestore_shard2_replica_n3] o.a.s.s.HttpSolrCall [admin] webapp=null
path=/admin/cores
params={qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore&version=2&replicaType=NRT&property.customKey=customValue&coreNodeName=core_node6&name=hdfsbackuprestore_shard2_replica_n3&action=CREATE&numShards=2&shard=shard2&wt=javabin}
status=0 QTime=3097
[junit4] 2> 1244613 INFO (qtp1306448183-4875) [n:127.0.0.1:33650_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_t2] o.a.s.c.ZkController
hdfsbackuprestore_shard1_replica_t2 starting background replication from leader
[junit4] 2> 1244613 INFO (qtp1306448183-4875) [n:127.0.0.1:33650_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_t2] o.a.s.c.ReplicateFromLeader Will start
replication from leader with poll interval: 00:00:03
[junit4] 2> 1244648 INFO (qtp1306448183-4875) [n:127.0.0.1:33650_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_t2] o.a.s.h.ReplicationHandler Poll
scheduled at an interval of 3000ms
[junit4] 2> 1244648 INFO (qtp1306448183-4875) [n:127.0.0.1:33650_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_t2] o.a.s.h.ReplicationHandler Commits will
be reserved for 10000ms.
[junit4] 2> 1244649 INFO (qtp1306448183-4875) [n:127.0.0.1:33650_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_t2] o.a.s.s.HttpSolrCall [admin] webapp=null
path=/admin/cores
params={qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore&version=2&replicaType=TLOG&property.customKey=customValue&coreNodeName=core_node5&name=hdfsbackuprestore_shard1_replica_t2&action=CREATE&numShards=2&shard=shard1&wt=javabin}
status=0 QTime=3307
[junit4] 2> 1244795 INFO (qtp1306448183-4876) [n:127.0.0.1:33650_solr
c:hdfsbackuprestore s:shard2 r:core_node8
x:hdfsbackuprestore_shard2_replica_t7] o.a.s.c.ZkController
hdfsbackuprestore_shard2_replica_t7 starting background replication from leader
[junit4] 2> 1244795 INFO (qtp1306448183-4876) [n:127.0.0.1:33650_solr
c:hdfsbackuprestore s:shard2 r:core_node8
x:hdfsbackuprestore_shard2_replica_t7] o.a.s.c.ReplicateFromLeader Will start
replication from leader with poll interval: 00:00:03
[junit4] 2> 1244961 INFO (qtp1306448183-4876) [n:127.0.0.1:33650_solr
c:hdfsbackuprestore s:shard2 r:core_node8
x:hdfsbackuprestore_shard2_replica_t7] o.a.s.h.ReplicationHandler Poll
scheduled at an interval of 3000ms
[junit4] 2> 1244961 INFO (qtp1306448183-4876) [n:127.0.0.1:33650_solr
c:hdfsbackuprestore s:shard2 r:core_node8
x:hdfsbackuprestore_shard2_replica_t7] o.a.s.h.ReplicationHandler Commits will
be reserved for 10000ms.
[junit4] 2> 1244962 INFO (qtp1306448183-4876) [n:127.0.0.1:33650_solr
c:hdfsbackuprestore s:shard2 r:core_node8
x:hdfsbackuprestore_shard2_replica_t7] o.a.s.s.HttpSolrCall [admin] webapp=null
path=/admin/cores
params={qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore&version=2&replicaType=TLOG&property.customKey=customValue&coreNodeName=core_node8&name=hdfsbackuprestore_shard2_replica_t7&action=CREATE&numShards=2&shard=shard2&wt=javabin}
status=0 QTime=3622
[junit4] 2> 1244990 INFO
(zkCallback-584-thread-1-processing-n:127.0.0.1:46340_solr)
[n:127.0.0.1:46340_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 1244990 INFO
(zkCallback-584-thread-3-processing-n:127.0.0.1:46340_solr)
[n:127.0.0.1:46340_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 1244990 INFO
(zkCallback-584-thread-2-processing-n:127.0.0.1:46340_solr)
[n:127.0.0.1:46340_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 1244998 INFO
(zkCallback-583-thread-3-processing-n:127.0.0.1:33650_solr)
[n:127.0.0.1:33650_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 1244998 INFO
(zkCallback-583-thread-4-processing-n:127.0.0.1:33650_solr)
[n:127.0.0.1:33650_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 1244998 INFO
(zkCallback-583-thread-1-processing-n:127.0.0.1:33650_solr)
[n:127.0.0.1:33650_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 1244999 INFO (indexFetcher-1652-thread-1)
[n:127.0.0.1:33650_solr c:hdfsbackuprestore s:shard2 r:core_node8
x:hdfsbackuprestore_shard2_replica_t7] o.a.s.h.IndexFetcher Updated masterUrl
to http://127.0.0.1:46340/solr/hdfsbackuprestore_shard2_replica_n3/
[junit4] 2> 1245015 INFO (qtp1120325267-4893) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard2 r:core_node6
x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.S.Request
[hdfsbackuprestore_shard2_replica_n3] webapp=/solr path=/replication
params={qt=/replication&wt=javabin&version=2&command=indexversion} status=0
QTime=0
[junit4] 2> 1245015 INFO (indexFetcher-1652-thread-1)
[n:127.0.0.1:33650_solr c:hdfsbackuprestore s:shard2 r:core_node8
x:hdfsbackuprestore_shard2_replica_t7] o.a.s.h.IndexFetcher Master's
generation: 1
[junit4] 2> 1245015 INFO (indexFetcher-1652-thread-1)
[n:127.0.0.1:33650_solr c:hdfsbackuprestore s:shard2 r:core_node8
x:hdfsbackuprestore_shard2_replica_t7] o.a.s.h.IndexFetcher Master's version: 0
[junit4] 2> 1245015 INFO (indexFetcher-1652-thread-1)
[n:127.0.0.1:33650_solr c:hdfsbackuprestore s:shard2 r:core_node8
x:hdfsbackuprestore_shard2_replica_t7] o.a.s.h.IndexFetcher Slave's generation:
1
[junit4] 2> 1245015 INFO (indexFetcher-1652-thread-1)
[n:127.0.0.1:33650_solr c:hdfsbackuprestore s:shard2 r:core_node8
x:hdfsbackuprestore_shard2_replica_t7] o.a.s.h.IndexFetcher Slave's version: 0
[junit4] 2> 1245015 INFO (indexFetcher-1652-thread-1)
[n:127.0.0.1:33650_solr c:hdfsbackuprestore s:shard2 r:core_node8
x:hdfsbackuprestore_shard2_replica_t7] o.a.s.h.IndexFetcher New index in
Master. Deleting mine...
[junit4] 2> 1245162 INFO (indexFetcher-1652-thread-1)
[n:127.0.0.1:33650_solr c:hdfsbackuprestore s:shard2 r:core_node8
x:hdfsbackuprestore_shard2_replica_t7] o.a.s.s.SolrIndexSearcher Opening
[Searcher@6c3d22e6[hdfsbackuprestore_shard2_replica_t7] main]
[junit4] 2> 1245163 INFO
(searcherExecutor-1631-thread-1-processing-n:127.0.0.1:33650_solr
x:hdfsbackuprestore_shard2_replica_t7 s:shard2 c:hdfsbackuprestore
r:core_node8) [n:127.0.0.1:33650_solr c:hdfsbackuprestore s:shard2 r:core_node8
x:hdfsbackuprestore_shard2_replica_t7] o.a.s.c.SolrCore
[hdfsbackuprestore_shard2_replica_t7] Registered new searcher
Searcher@6c3d22e6[hdfsbackuprestore_shard2_replica_t7]
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
[junit4] 2> 1245173 INFO (qtp1120325267-4889) [n:127.0.0.1:46340_solr
] o.a.s.h.a.CollectionsHandler Wait for new collection to be active for at most
30 seconds. Check all shard replicas
[junit4] 2> 1245384 INFO
(zkCallback-583-thread-1-processing-n:127.0.0.1:33650_solr)
[n:127.0.0.1:33650_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 1245384 INFO
(zkCallback-583-thread-4-processing-n:127.0.0.1:33650_solr)
[n:127.0.0.1:33650_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 1245384 INFO
(zkCallback-583-thread-3-processing-n:127.0.0.1:33650_solr)
[n:127.0.0.1:33650_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 1245400 INFO
(zkCallback-584-thread-2-processing-n:127.0.0.1:46340_solr)
[n:127.0.0.1:46340_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 1245400 INFO
(zkCallback-584-thread-3-processing-n:127.0.0.1:46340_solr)
[n:127.0.0.1:46340_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 1245400 INFO
(zkCallback-584-thread-1-processing-n:127.0.0.1:46340_solr)
[n:127.0.0.1:46340_solr ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/hdfsbackuprestore/state.json] for collection
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
[junit4] 2> 1245929 INFO (indexFetcher-1650-thread-1)
[n:127.0.0.1:33650_solr c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_t2] o.a.s.h.IndexFetcher Updated masterUrl
to http://127.0.0.1:46340/solr/hdfsbackuprestore_shard1_replica_n1/
[junit4] 2> 1245930 INFO (qtp1120325267-4893) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard1 r:core_node4
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.S.Request
[hdfsbackuprestore_shard1_replica_n1] webapp=/solr path=/replication
params={qt=/replication&wt=javabin&version=2&command=indexversion} status=0
QTime=0
[junit4] 2> 1245930 INFO (indexFetcher-1650-thread-1)
[n:127.0.0.1:33650_solr c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_t2] o.a.s.h.IndexFetcher Master's
generation: 1
[junit4] 2> 1245930 INFO (indexFetcher-1650-thread-1)
[n:127.0.0.1:33650_solr c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_t2] o.a.s.h.IndexFetcher Master's version: 0
[junit4] 2> 1245930 INFO (indexFetcher-1650-thread-1)
[n:127.0.0.1:33650_solr c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_t2] o.a.s.h.IndexFetcher Slave's generation:
1
[junit4] 2> 1245930 INFO (indexFetcher-1650-thread-1)
[n:127.0.0.1:33650_solr c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_t2] o.a.s.h.IndexFetcher Slave's version: 0
[junit4] 2> 1245930 INFO (indexFetcher-1650-thread-1)
[n:127.0.0.1:33650_solr c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_t2] o.a.s.h.IndexFetcher New index in
Master. Deleting mine...
[junit4] 2> 1245932 INFO (indexFetcher-1650-thread-1)
[n:127.0.0.1:33650_solr c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_t2] o.a.s.s.SolrIndexSearcher Opening
[Searcher@147ca3c[hdfsbackuprestore_shard1_replica_t2] main]
[junit4] 2> 1245987 INFO
(searcherExecutor-1632-thread-1-processing-n:127.0.0.1:33650_solr
x:hdfsbackuprestore_shard1_replica_t2 s:shard1 c:hdfsbackuprestore
r:core_node5) [n:127.0.0.1:33650_solr c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_t2] o.a.s.c.SolrCore
[hdfsbackuprestore_shard1_replica_t2] Registered new searcher
Searcher@147ca3c[hdfsbackuprestore_shard1_replica_t2]
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
[junit4] 2> 1246185 INFO (qtp1120325267-4889) [n:127.0.0.1:46340_solr
] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/collections
params={replicationFactor=1&collection.configName=conf1&router.name=implicit&version=2&pullReplicas=0&shards=shard1,shard2&property.customKey=customValue&maxShardsPerNode=2&router.field=shard_s&autoAddReplicas=true&name=hdfsbackuprestore&nrtReplicas=1&action=CREATE&tlogReplicas=1&wt=javabin}
status=0 QTime=5350
[junit4] 2> 1246305 INFO (qtp1306448183-4881) [n:127.0.0.1:33650_solr
c:hdfsbackuprestore s:shard2 r:core_node8
x:hdfsbackuprestore_shard2_replica_t7] o.a.s.u.p.LogUpdateProcessorFactory
[hdfsbackuprestore_shard2_replica_t7] webapp=/solr path=/update
params={update.distrib=FROMLEADER&distrib.from=http://127.0.0.1:46340/solr/hdfsbackuprestore_shard2_replica_n3/&wt=javabin&version=2}{add=[0
(1581296901071831040), 1 (1581296901124259840), 2 (1581296901125308416)]} 0 10
[junit4] HEARTBEAT J2 PID([email protected]):
2017-10-15T04:37:46, stalled for 63.7s at:
ForceLeaderTest.testLastPublishedStateIsActive
[junit4] 2> 1246311 INFO (qtp1120325267-4891) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard2 r:core_node6
x:hdfsbackuprestore_shard2_replica_n3] o.a.s.u.p.LogUpdateProcessorFactory
[hdfsbackuprestore_shard2_replica_n3] webapp=/solr path=/update
params={update.distrib=TOLEADER&distrib.from=http://127.0.0.1:46340/solr/hdfsbackuprestore_shard1_replica_n1/&wt=javabin&version=2}{add=[0
(1581296901071831040), 1 (1581296901124259840), 2 (1581296901125308416)]} 0 86
[junit4] 2> 1246315 INFO (qtp1120325267-4890) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard1 r:core_node4
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.p.LogUpdateProcessorFactory
[hdfsbackuprestore_shard1_replica_n1] webapp=/solr path=/update
params={wt=javabin&version=2}{add=[0, 1, 2]} 0 93
[junit4] 2> 1246406 INFO (qtp1120325267-4892) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard1 r:core_node4
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.DirectUpdateHandler2 start
commit{_version_=1581296901262671872,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
[junit4] 2> 1246406 INFO (qtp1120325267-4893) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard2 r:core_node6
x:hdfsbackuprestore_shard2_replica_n3] o.a.s.u.DirectUpdateHandler2 start
commit{_version_=1581296901262671872,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
[junit4] 2> 1246413 INFO (qtp1306448183-4882) [n:127.0.0.1:33650_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_t2] o.a.s.u.TestInjection Start waiting for
replica in sync with leader
[junit4] 2> 1246415 INFO (qtp1306448183-4879) [n:127.0.0.1:33650_solr
c:hdfsbackuprestore s:shard2 r:core_node8
x:hdfsbackuprestore_shard2_replica_t7] o.a.s.u.TestInjection Start waiting for
replica in sync with leader
[junit4] 2> 1246681 INFO (qtp1120325267-4892) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard1 r:core_node4
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.DirectUpdateHandler2 No
uncommitted changes. Skipping IW.commit.
[junit4] 2> 1246681 INFO (qtp1120325267-4893) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard2 r:core_node6
x:hdfsbackuprestore_shard2_replica_n3] o.a.s.u.SolrIndexWriter Calling
setCommitData with IW:org.apache.solr.update.SolrIndexWriter@5eed693c
commitCommandVersion:1581296901262671872
[junit4] 2> 1246881 INFO (qtp1120325267-4892) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard1 r:core_node4
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.DirectUpdateHandler2
end_commit_flush
[junit4] 2> 1246881 INFO (qtp1120325267-4892) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard1 r:core_node4
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.p.LogUpdateProcessorFactory
[hdfsbackuprestore_shard1_replica_n1] webapp=/solr path=/update
params={update.distrib=FROMLEADER&waitSearcher=true&openSearcher=true&commit=true&softCommit=false&distrib.from=http://127.0.0.1:46340/solr/hdfsbackuprestore_shard2_replica_n3/&commit_end_point=true&wt=javabin&version=2&expungeDeletes=false}{commit=}
0 475
[junit4] 2> 1246934 INFO (qtp1120325267-4892) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard2 r:core_node6
x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.S.Request
[hdfsbackuprestore_shard2_replica_n3] webapp=/solr path=/replication
params={qt=/replication&wt=javabin&version=2&command=details} status=0 QTime=37
[junit4] 2> 1246935 INFO (qtp1120325267-4889) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard1 r:core_node4
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.S.Request
[hdfsbackuprestore_shard1_replica_n1] webapp=/solr path=/replication
params={qt=/replication&wt=javabin&version=2&command=details} status=0 QTime=274
[junit4] 2> 1246935 INFO (qtp1306448183-4882) [n:127.0.0.1:33650_solr
c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_t2] o.a.s.u.p.LogUpdateProcessorFactory
[hdfsbackuprestore_shard1_replica_t2] webapp=/solr path=/update
params={update.distrib=FROMLEADER&waitSearcher=true&openSearcher=true&commit=true&softCommit=false&distrib.from=http://127.0.0.1:46340/solr/hdfsbackuprestore_shard2_replica_n3/&commit_end_point=true&wt=javabin&version=2&expungeDeletes=false}{commit=}
0 522
[junit4] 2> 1247449 INFO (qtp1120325267-4891) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard2 r:core_node6
x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.S.Request
[hdfsbackuprestore_shard2_replica_n3] webapp=/solr path=/replication
params={qt=/replication&wt=javabin&version=2&command=details} status=0 QTime=0
[junit4] 2> 1247482 INFO (qtp1120325267-4893) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard2 r:core_node6
x:hdfsbackuprestore_shard2_replica_n3] o.a.s.s.SolrIndexSearcher Opening
[Searcher@342ee91a[hdfsbackuprestore_shard2_replica_n3] main]
[junit4] 2> 1247484 INFO
(searcherExecutor-1630-thread-1-processing-n:127.0.0.1:46340_solr
x:hdfsbackuprestore_shard2_replica_n3 s:shard2 c:hdfsbackuprestore
r:core_node6) [n:127.0.0.1:46340_solr c:hdfsbackuprestore s:shard2 r:core_node6
x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.SolrCore
[hdfsbackuprestore_shard2_replica_n3] Registered new searcher
Searcher@342ee91a[hdfsbackuprestore_shard2_replica_n3]
main{ExitableDirectoryReader(UninvertingDirectoryReader(Uninverting(_0(7.2.0):C3)))}
[junit4] 2> 1247485 INFO (qtp1120325267-4893) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard2 r:core_node6
x:hdfsbackuprestore_shard2_replica_n3] o.a.s.u.DirectUpdateHandler2
end_commit_flush
[junit4] 2> 1247485 INFO (qtp1120325267-4893) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard2 r:core_node6
x:hdfsbackuprestore_shard2_replica_n3] o.a.s.u.p.LogUpdateProcessorFactory
[hdfsbackuprestore_shard2_replica_n3] webapp=/solr path=/update
params={update.distrib=FROMLEADER&waitSearcher=true&openSearcher=true&commit=true&softCommit=false&distrib.from=http://127.0.0.1:46340/solr/hdfsbackuprestore_shard2_replica_n3/&commit_end_point=true&wt=javabin&version=2&expungeDeletes=false}{commit=}
0 1078
[junit4] 2> 1247965 INFO (qtp1120325267-4892) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard2 r:core_node6
x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.S.Request
[hdfsbackuprestore_shard2_replica_n3] webapp=/solr path=/replication
params={qt=/replication&wt=javabin&version=2&command=indexversion} status=0
QTime=0
[junit4] 2> 1247966 INFO (indexFetcher-1652-thread-1)
[n:127.0.0.1:33650_solr c:hdfsbackuprestore s:shard2 r:core_node8
x:hdfsbackuprestore_shard2_replica_t7] o.a.s.h.IndexFetcher Master's
generation: 2
[junit4] 2> 1247966 INFO (indexFetcher-1652-thread-1)
[n:127.0.0.1:33650_solr c:hdfsbackuprestore s:shard2 r:core_node8
x:hdfsbackuprestore_shard2_replica_t7] o.a.s.h.IndexFetcher Master's version:
1508042241622
[junit4] 2> 1247966 INFO (indexFetcher-1652-thread-1)
[n:127.0.0.1:33650_solr c:hdfsbackuprestore s:shard2 r:core_node8
x:hdfsbackuprestore_shard2_replica_t7] o.a.s.h.IndexFetcher Slave's generation:
1
[junit4] 2> 1247967 INFO (indexFetcher-1652-thread-1)
[n:127.0.0.1:33650_solr c:hdfsbackuprestore s:shard2 r:core_node8
x:hdfsbackuprestore_shard2_replica_t7] o.a.s.h.IndexFetcher Slave's version: 0
[junit4] 2> 1247967 INFO (indexFetcher-1652-thread-1)
[n:127.0.0.1:33650_solr c:hdfsbackuprestore s:shard2 r:core_node8
x:hdfsbackuprestore_shard2_replica_t7] o.a.s.h.IndexFetcher Starting
replication process
[junit4] 2> 1247968 INFO (qtp1120325267-4892) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard2 r:core_node6
x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.S.Request
[hdfsbackuprestore_shard2_replica_n3] webapp=/solr path=/replication
params={generation=2&qt=/replication&wt=javabin&version=2&command=filelist}
status=0 QTime=1
[junit4] 2> 1247969 INFO (indexFetcher-1652-thread-1)
[n:127.0.0.1:33650_solr c:hdfsbackuprestore s:shard2 r:core_node8
x:hdfsbackuprestore_shard2_replica_t7] o.a.s.h.IndexFetcher Number of files in
latest index in master: 18
[junit4] 2> 1247983 INFO (qtp1120325267-4887) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard2 r:core_node6
x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.S.Request
[hdfsbackuprestore_shard2_replica_n3] webapp=/solr path=/replication
params={qt=/replication&wt=javabin&version=2&command=details} status=0 QTime=0
[junit4] 2> 1248002 INFO (indexFetcher-1652-thread-1)
[n:127.0.0.1:33650_solr c:hdfsbackuprestore s:shard2 r:core_node8
x:hdfsbackuprestore_shard2_replica_t7] o.a.s.h.IndexFetcher Starting download
(fullCopy=false) to
MMapDirectory@/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J0/temp/solr.cloud.TestHdfsCloudBackupRestore_1A52D2B00C0736A5-001/tempDir-002/node1/hdfsbackuprestore_shard2_replica_t7/data/index.20171015053722910
lockFactory=org.apache.lucene.store.NativeFSLockFactory@62b57160
[junit4] 2> 1248003 INFO (qtp1120325267-4887) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard2 r:core_node6
x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.S.Request
[hdfsbackuprestore_shard2_replica_n3] webapp=/solr path=/replication
params={generation=2&qt=/replication&file=_0.si&checksum=true&wt=filestream&command=filecontent}
status=0 QTime=0
[junit4] 2> 1248027 INFO (qtp1120325267-4890) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard2 r:core_node6
x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.S.Request
[hdfsbackuprestore_shard2_replica_n3] webapp=/solr path=/replication
params={generation=2&qt=/replication&file=_0_Direct_0.doc&checksum=true&wt=filestream&command=filecontent}
status=0 QTime=0
[junit4] 2> 1248101 INFO (qtp1120325267-4889) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard2 r:core_node6
x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.S.Request
[hdfsbackuprestore_shard2_replica_n3] webapp=/solr path=/replication
params={generation=2&qt=/replication&file=_0_Direct_0.tim&checksum=true&wt=filestream&command=filecontent}
status=0 QTime=0
[junit4] 2> 1248185 INFO (qtp1120325267-4892) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard2 r:core_node6
x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.S.Request
[hdfsbackuprestore_shard2_replica_n3] webapp=/solr path=/replication
params={generation=2&qt=/replication&file=_0_Direct_0.tip&checksum=true&wt=filestream&command=filecontent}
status=0 QTime=0
[junit4] 2> 1248186 INFO (qtp1120325267-4893) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard2 r:core_node6
x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.S.Request
[hdfsbackuprestore_shard2_replica_n3] webapp=/solr path=/replication
params={generation=2&qt=/replication&file=_0.nvd&checksum=true&wt=filestream&command=filecontent}
status=0 QTime=0
[junit4] 2> 1248187 INFO (qtp1120325267-4887) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard2 r:core_node6
x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.S.Request
[hdfsbackuprestore_shard2_replica_n3] webapp=/solr path=/replication
params={generation=2&qt=/replication&file=_0.fdx&checksum=true&wt=filestream&command=filecontent}
status=0 QTime=0
[junit4] 2> 1248189 INFO (qtp1120325267-4890) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard2 r:core_node6
x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.S.Request
[hdfsbackuprestore_shard2_replica_n3] webapp=/solr path=/replication
params={generation=2&qt=/replication&file=_0.fdt&checksum=true&wt=filestream&command=filecontent}
status=0 QTime=0
[junit4] 2> 1248202 INFO (qtp1120325267-4890) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard2 r:core_node6
x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.S.Request
[hdfsbackuprestore_shard2_replica_n3] webapp=/solr path=/replication
params={generation=2&qt=/replication&file=_0.dii&checksum=true&wt=filestream&command=filecontent}
status=0 QTime=0
[junit4] 2> 1248203 INFO (qtp1120325267-4890) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard2 r:core_node6
x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.S.Request
[hdfsbackuprestore_shard2_replica_n3] webapp=/solr path=/replication
params={generation=2&qt=/replication&file=_0_MockRandom_0.pos&checksum=true&wt=filestream&command=filecontent}
status=0 QTime=0
[junit4] 2> 1248222 INFO (qtp1120325267-4890) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard2 r:core_node6
x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.S.Request
[hdfsbackuprestore_shard2_replica_n3] webapp=/solr path=/replication
params={generation=2&qt=/replication&file=_0_MockRandom_0.doc&checksum=true&wt=filestream&command=filecontent}
status=0 QTime=0
[junit4] 2> 1248238 INFO (qtp1120325267-4889) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard2 r:core_node6
x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.S.Request
[hdfsbackuprestore_shard2_replica_n3] webapp=/solr path=/replication
params={generation=2&qt=/replication&file=_0_MockRandom_0.tim&checksum=true&wt=filestream&command=filecontent}
status=0 QTime=0
[junit4] 2> 1248239 INFO (qtp1120325267-4892) [n:127.0.0.1:46340_solr
c:hdfsbackuprestore s:shard2 r:core_node6
x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.S.Request
[hdfsbackuprestore_shard2_replica_n3] webapp=/solr path=/replication pa
[...truncated too long message...]
sbackuprestore_restored.shard2.replica_n41, service url = null, agent id =
null] for registry solr.core.hdfsbackuprestore_restored.shard2.replica_n41 /
com.codahale.metrics.MetricRegistry@2a9a0264
[junit4] 2> 1264716 INFO (coreCloseExecutor-1679-thread-3)
[n:127.0.0.1:33650_solr c:hdfsbackuprestore_restored s:shard2 r:core_node46
x:hdfsbackuprestore_restored_shard2_replica_t45] o.a.s.m.SolrMetricManager
Closing metric reporters for
registry=solr.core.hdfsbackuprestore_restored.shard2.replica_t45, tag=412209052
[junit4] 2> 1264717 INFO (coreCloseExecutor-1679-thread-3)
[n:127.0.0.1:33650_solr c:hdfsbackuprestore_restored s:shard2 r:core_node46
x:hdfsbackuprestore_restored_shard2_replica_t45] o.a.s.m.r.SolrJmxReporter
Closing reporter [org.apache.solr.metrics.reporters.SolrJmxReporter@42391743:
rootName = null, domain =
solr.core.hdfsbackuprestore_restored.shard2.replica_t45, service url = null,
agent id = null] for registry
solr.core.hdfsbackuprestore_restored.shard2.replica_t45 /
com.codahale.metrics.MetricRegistry@61ea58d1
[junit4] 2> 1264818 INFO (coreCloseExecutor-1678-thread-3)
[n:127.0.0.1:46340_solr c:hdfsbackuprestore_restored s:shard2 r:core_node42
x:hdfsbackuprestore_restored_shard2_replica_n41] o.a.s.m.SolrMetricManager
Closing metric reporters for
registry=solr.collection.hdfsbackuprestore_restored.shard2.leader, tag=76754075
[junit4] 2> 1264818 INFO (coreCloseExecutor-1678-thread-4)
[n:127.0.0.1:46340_solr c:hdfsbackuprestore_restored s:shard1 r:core_node44
x:hdfsbackuprestore_restored_shard1_replica_n43] o.a.s.m.SolrMetricManager
Closing metric reporters for
registry=solr.core.hdfsbackuprestore_restored.shard1.replica_n43, tag=1896917766
[junit4] 2> 1264818 INFO (coreCloseExecutor-1678-thread-4)
[n:127.0.0.1:46340_solr c:hdfsbackuprestore_restored s:shard1 r:core_node44
x:hdfsbackuprestore_restored_shard1_replica_n43] o.a.s.m.r.SolrJmxReporter
Closing reporter [org.apache.solr.metrics.reporters.SolrJmxReporter@3b9d862a:
rootName = null, domain =
solr.core.hdfsbackuprestore_restored.shard1.replica_n43, service url = null,
agent id = null] for registry
solr.core.hdfsbackuprestore_restored.shard1.replica_n43 /
com.codahale.metrics.MetricRegistry@390a8d22
[junit4] 2> 1264873 INFO (coreCloseExecutor-1679-thread-3)
[n:127.0.0.1:33650_solr c:hdfsbackuprestore_restored s:shard2 r:core_node46
x:hdfsbackuprestore_restored_shard2_replica_t45] o.a.s.m.SolrMetricManager
Closing metric reporters for
registry=solr.collection.hdfsbackuprestore_restored.shard2.leader, tag=412209052
[junit4] 2> 1264914 INFO (coreCloseExecutor-1679-thread-4)
[n:127.0.0.1:33650_solr c:hdfsbackuprestore_restored s:shard1 r:core_node48
x:hdfsbackuprestore_restored_shard1_replica_t47] o.a.s.m.SolrMetricManager
Closing metric reporters for
registry=solr.core.hdfsbackuprestore_restored.shard1.replica_t47, tag=1928131343
[junit4] 2> 1264914 INFO (coreCloseExecutor-1679-thread-4)
[n:127.0.0.1:33650_solr c:hdfsbackuprestore_restored s:shard1 r:core_node48
x:hdfsbackuprestore_restored_shard1_replica_t47] o.a.s.m.r.SolrJmxReporter
Closing reporter [org.apache.solr.metrics.reporters.SolrJmxReporter@c4a2e41:
rootName = null, domain =
solr.core.hdfsbackuprestore_restored.shard1.replica_t47, service url = null,
agent id = null] for registry
solr.core.hdfsbackuprestore_restored.shard1.replica_t47 /
com.codahale.metrics.MetricRegistry@194b6f6a
[junit4] 2> 1264915 INFO (coreCloseExecutor-1678-thread-4)
[n:127.0.0.1:46340_solr c:hdfsbackuprestore_restored s:shard1 r:core_node44
x:hdfsbackuprestore_restored_shard1_replica_n43] o.a.s.m.SolrMetricManager
Closing metric reporters for
registry=solr.collection.hdfsbackuprestore_restored.shard1.leader,
tag=1896917766
[junit4] 2> 1264990 INFO (coreCloseExecutor-1679-thread-4)
[n:127.0.0.1:33650_solr c:hdfsbackuprestore_restored s:shard1 r:core_node48
x:hdfsbackuprestore_restored_shard1_replica_t47] o.a.s.m.SolrMetricManager
Closing metric reporters for
registry=solr.collection.hdfsbackuprestore_restored.shard1.leader,
tag=1928131343
[junit4] 2> 1264991 INFO (coreCloseExecutor-1679-thread-1)
[n:127.0.0.1:33650_solr c:hdfsbackuprestore s:shard1 r:core_node5
x:hdfsbackuprestore_shard1_replica_t2] o.a.s.m.SolrMetricManager Closing metric
reporters for registry=solr.collection.hdfsbackuprestore.shard1.leader,
tag=1281540121
[junit4] 2> 1264998 INFO (coreCloseExecutor-1679-thread-2)
[n:127.0.0.1:33650_solr c:hdfsbackuprestore s:shard2 r:core_node8
x:hdfsbackuprestore_shard2_replica_t7] o.a.s.m.SolrMetricManager Closing metric
reporters for registry=solr.collection.hdfsbackuprestore.shard2.leader,
tag=1524881456
[junit4] 2> 1265093 WARN
(zkCallback-584-thread-2-processing-n:127.0.0.1:46340_solr)
[n:127.0.0.1:46340_solr ] o.a.s.c.c.ZkStateReader ZooKeeper watch triggered,
but Solr cannot talk to ZK: [KeeperErrorCode = Session expired for /live_nodes]
[junit4] 2> 1265094 INFO
(zkCallback-583-thread-3-processing-n:127.0.0.1:33650_solr)
[n:127.0.0.1:33650_solr ] o.a.s.c.c.ZkStateReader Updated live nodes from
ZooKeeper... (2) -> (1)
[junit4] 2> 1265128 INFO (jetty-closer-573-thread-2) [ ]
o.e.j.s.h.ContextHandler Stopped
o.e.j.s.ServletContextHandler@5144a051{/solr,null,UNAVAILABLE}
[junit4] 2> 1265145 INFO (jetty-closer-573-thread-1) [ ]
o.a.s.c.Overseer Overseer
(id=98831055757770757-127.0.0.1:33650_solr-n_0000000000) closing
[junit4] 2> 1265145 INFO
(OverseerStateUpdate-98831055757770757-127.0.0.1:33650_solr-n_0000000000)
[n:127.0.0.1:33650_solr ] o.a.s.c.Overseer Overseer Loop exiting :
127.0.0.1:33650_solr
[junit4] 2> 1265165 WARN
(OverseerAutoScalingTriggerThread-98831055757770757-127.0.0.1:33650_solr-n_0000000000)
[n:127.0.0.1:33650_solr ] o.a.s.c.a.OverseerTriggerThread
OverseerTriggerThread woken up but we are closed, exiting.
[junit4] 2> 1266666 WARN
(zkCallback-583-thread-3-processing-n:127.0.0.1:33650_solr)
[n:127.0.0.1:33650_solr ] o.a.s.c.c.ZkStateReader ZooKeeper watch triggered,
but Solr cannot talk to ZK: [KeeperErrorCode = Session expired for /live_nodes]
[junit4] 2> 1266667 INFO (jetty-closer-573-thread-1) [ ]
o.e.j.s.h.ContextHandler Stopped
o.e.j.s.ServletContextHandler@39a136dd{/solr,null,UNAVAILABLE}
[junit4] 2> 1266667 ERROR
(SUITE-TestHdfsCloudBackupRestore-seed#[1A52D2B00C0736A5]-worker) [ ]
o.a.z.s.ZooKeeperServer ZKShutdownHandler is not registered, so ZooKeeper
server won't take any action on ERROR or SHUTDOWN server state changes
[junit4] 2> 1266668 INFO
(SUITE-TestHdfsCloudBackupRestore-seed#[1A52D2B00C0736A5]-worker) [ ]
o.a.s.c.ZkTestServer connecting to 127.0.0.1:38810 38810
[junit4] 2> 1266960 INFO (Thread-1751) [ ] o.a.s.c.ZkTestServer
connecting to 127.0.0.1:38810 38810
[junit4] 2> 1266960 WARN (Thread-1751) [ ] o.a.s.c.ZkTestServer Watch
limit violations:
[junit4] 2> Maximum concurrent create/delete watches above limit:
[junit4] 2>
[junit4] 2> 4
/solr/collections/hdfsbackuprestore_restored/state.json
[junit4] 2> 3 /solr/aliases.json
[junit4] 2> 3 /solr/clusterprops.json
[junit4] 2> 2 /solr/security.json
[junit4] 2> 2 /solr/configs/customConfigName
[junit4] 2> 2 /solr/configs/conf1
[junit4] 2>
[junit4] 2> Maximum concurrent data watches above limit:
[junit4] 2>
[junit4] 2> 38 /solr/collections/hdfsbackuprestore/state.json
[junit4] 2> 3 /solr/clusterstate.json
[junit4] 2>
[junit4] 2> Maximum concurrent children watches above limit:
[junit4] 2>
[junit4] 2> 3 /solr/live_nodes
[junit4] 2> 3 /solr/collections
[junit4] 2>
[junit4] 2> NOTE: leaving temporary files on disk at:
/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J0/temp/solr.cloud.TestHdfsCloudBackupRestore_1A52D2B00C0736A5-001
[junit4] 2> Oct 15, 2017 4:37:41 AM
com.carrotsearch.randomizedtesting.ThreadLeakControl checkThreadLeaks
[junit4] 2> WARNING: Will linger awaiting termination of 3 leaked
thread(s).
[junit4] 2> NOTE: test params are: codec=Asserting(Lucene70):
{shard_s=PostingsFormat(name=MockRandom), id=PostingsFormat(name=Direct)},
docValues:{}, maxPointsInLeafNode=1664, maxMBSortInHeap=7.910597613868237,
sim=RandomSimilarity(queryNorm=false): {}, locale=hr,
timezone=Africa/Brazzaville
[junit4] 2> NOTE: Linux 4.4.0-83-generic amd64/Oracle Corporation
1.8.0_144 (64-bit)/cpus=4,threads=2,free=114562240,total=514850816
[junit4] 2> NOTE: All tests run in this JVM: [ZkNodePropsTest,
CollectionTooManyReplicasTest, TestCollapseQParserPlugin,
TestUniqueKeyFieldResource, ZkStateReaderTest,
ConcurrentDeleteAndCreateCollectionTest, SparseHLLTest, SolrIndexMetricsTest,
IndexBasedSpellCheckerTest, TestExpandComponent,
ClassificationUpdateProcessorIntegrationTest, TestFieldCacheSortRandom,
TestSolrXml, TestSubQueryTransformerCrossCore, TestSchemaManager,
TestFieldCollectionResource, BigEndianAscendingWordSerializerTest,
TestMiniSolrCloudClusterSSL, DistributedSuggestComponentTest, NodeMutatorTest,
DocExpirationUpdateProcessorFactoryTest, SSLMigrationTest, MetricsConfigTest,
TestRealTimeGet, DeleteLastCustomShardedReplicaTest, HdfsRecoveryZkTest,
TestFieldTypeResource, TestRangeQuery, HdfsThreadLeakTest, RankQueryTest,
SolrCoreTest, TestEmbeddedSolrServerSchemaAPI, SolrMetricReporterTest,
TestFaceting, TestCSVResponseWriter, FullHLLTest,
HdfsCollectionsAPIDistributedZkTest, TestManagedSchemaAPI,
DistributedQueryElevationComponentTest, LeaderFailureAfterFreshStartTest,
CreateCollectionCleanupTest, TestLegacyNumericRangeQueryBuilder,
LeaderElectionIntegrationTest, TestJsonRequest, TestRandomRequestDistribution,
TestClassicSimilarityFactory, TestConfigOverlay, TestRTimerTree,
MoveReplicaHDFSTest, TestPhraseSuggestions, TestSQLHandlerNonCloud,
TestHdfsCloudBackupRestore]
[junit4] Completed [123/744 (1!)] on J0 in 50.45s, 1 test, 1 failure <<<
FAILURES!
[...truncated 49026 lines...]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]