Copilot commented on code in PR #3904: URL: https://github.com/apache/solr/pull/3904#discussion_r2658945873
########## solr/packaging/test/test_create_collection_with_remnants.bats: ########## @@ -0,0 +1,92 @@ +#!/usr/bin/env bats + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Tests what happens when remnant core directories and and files still are on disk. +# Flip solr.delete.unknown.cores=false to see the out of box behavior and the failures. Review Comment: The BATS test comment on line 19 references "solr.delete.unknown.cores" but should reference "solr.cloud.delete.unknown.cores.enabled" to match the actual property name being used in this PR. ```suggestion # Flip solr.cloud.delete.unknown.cores.enabled=false to see the out of box behavior and the failures. ``` ########## solr/core/src/java/org/apache/solr/core/CoreContainer.java: ########## @@ -1511,6 +1511,16 @@ public SolrCore create( preExistingZkEntry = getZkController().checkIfCoreNodeNameAlreadyExists(cd); } + final boolean deleteUnknownCores = + Boolean.parseBoolean( + System.getProperty("solr.cloud.delete.unknown.cores.enabled", "false")); + if (deleteUnknownCores && Files.exists(cd.getInstanceDir())) { + log.warn( + "There appears to be an existing directory for core {}, now deleting it", Review Comment: The warning message on line 1518-1520 could be more informative for production debugging. Currently it only logs the core name, but doesn't indicate that this is an automatic deletion triggered by the "solr.cloud.delete.unknown.cores.enabled" property, or provide the full path being deleted. Consider including the property name in the warning message and the full path to help administrators understand what's happening and why. For example: "Automatically deleting existing directory at [path] for core [name] because solr.cloud.delete.unknown.cores.enabled is true". ```suggestion "Automatically deleting existing directory at [{}] for core [{}] because solr.cloud.delete.unknown.cores.enabled is true", cd.getInstanceDir().toAbsolutePath(), ``` ########## solr/core/src/test/org/apache/solr/cloud/DeleteCoreRemnantsOnCreateTest.java: ########## @@ -0,0 +1,327 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.solr.cloud; + +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.List; +import java.util.Optional; +import org.apache.solr.client.solrj.request.CollectionAdminRequest; +import org.apache.solr.client.solrj.request.CoreAdminRequest; +import org.apache.solr.client.solrj.response.json.JsonMapResponseParser; +import org.apache.solr.common.cloud.DocCollection; +import org.apache.solr.common.cloud.Replica; +import org.apache.solr.common.cloud.Slice; +import org.apache.solr.core.CoreDescriptor; +import org.apache.solr.core.SolrCore; +import org.apache.solr.embedded.JettySolrRunner; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +/** + * Solr occasionally gets into an inconsistent state with its cores lifecycle where remnant files + * are left on disk after various operations that delete a core. Examples include deleting a + * collection operation that doesn't properly finish, or maybe the Solr process unexpectedly gets + * killed. The system property "solr.cloud.delete.unknown.cores.enabled" is an expert setting that + * when enabled automatically deletes any remnant core data on disk when new cores are created that + * would otherwise fail due to the preexisting files. You should be cautious in enabling this + * feature, as it means that something isn't working well in your Solr setup. + */ +public class DeleteCoreRemnantsOnCreateTest extends SolrCloudTestCase { + private static final String DELETE_UNKNOWN_CORES_PROP = "solr.cloud.delete.unknown.cores.enabled"; + + @BeforeClass + public static void setupCluster() throws Exception { + configureCluster(1).addConfig("conf", configset("cloud-minimal")).configure(); + } + + @Before + public void resetProperty() { + System.clearProperty(DELETE_UNKNOWN_CORES_PROP); + } + + /** + * Shared setup for testing collection creation with remnants. Creates a collection, deletes it, + * and then leaves behind a remnant directory. + */ + private void setupCollectionRemnant(String collectionName) throws Exception { + List<JettySolrRunner> jettys = cluster.getJettySolrRunners(); + String primaryNode = jettys.getFirst().getNodeName(); + + CollectionAdminRequest.Create createRequest = + CollectionAdminRequest.createCollection(collectionName, "conf", 1, 1); + createRequest.setCreateNodeSet(primaryNode); + createRequest.process(cluster.getSolrClient()); + + waitForState( + "Expected collection to be fully active", + collectionName, + (n, c) -> SolrCloudTestCase.replicasForCollectionAreFullyActive(n, c, 1, 1)); + + Replica primaryReplica = getReplicaOnNode(collectionName, "shard1", primaryNode); + JettySolrRunner primaryJetty = cluster.getReplicaJetty(primaryReplica); + String originalCoreName = primaryReplica.getCoreName(); + Path remnantInstanceDir; + try (SolrCore core = primaryJetty.getCoreContainer().getCore(originalCoreName)) { + CoreDescriptor cd = core.getCoreDescriptor(); + remnantInstanceDir = cd.getInstanceDir(); + } + + CollectionAdminRequest.deleteCollection(collectionName).process(cluster.getSolrClient()); + waitForState("Expected collection deletion", collectionName, (n, c) -> c == null); + + // Simulate a core remnant still exists by creating the directory and core.properties + Files.createDirectories(remnantInstanceDir); + Files.writeString(remnantInstanceDir.resolve("core.properties"), "", StandardCharsets.UTF_8); + } + + /** + * Shared setup for testing replica addition with remnants. Creates a collection, then simulates a + * remnant directory on the single node that will impact the next addReplica command. + */ + private void setupReplicaRemnant(String collectionName) throws Exception { + List<JettySolrRunner> jettys = cluster.getJettySolrRunners(); + String primaryNode = jettys.getFirst().getNodeName(); + + CollectionAdminRequest.Create createRequest = + CollectionAdminRequest.createCollection(collectionName, "conf", 1, 1); + createRequest.setCreateNodeSet(primaryNode); + createRequest.process(cluster.getSolrClient()); + + waitForState( + "Expected collection to be fully active", + collectionName, + (n, c) -> SolrCloudTestCase.replicasForCollectionAreFullyActive(n, c, 1, 1)); + + int nextReplicaIndex = 3; // Yep, from 1 to 3 due to how we count in ZK and setup. + String expectedNewReplicaName = collectionName + "_shard1_replica_n" + nextReplicaIndex; + + // Simulate a core remnant on the single node adjacent to the existing replica instance path + Replica existing = getReplicaOnNode(collectionName, "shard1", primaryNode); Review Comment: The error message assertion on line 116 is fragile. It checks for the exact string "Underlying core creation failed", which could change in future Solr versions or may not be the exact message in all failure scenarios. Consider checking for a more general pattern or multiple possible error indicators (e.g., checking if the message contains "core" AND ("creation" OR "failed")) to make the test more robust to message changes. ########## solr/core/src/java/org/apache/solr/core/CoreContainer.java: ########## @@ -1511,6 +1511,16 @@ public SolrCore create( preExistingZkEntry = getZkController().checkIfCoreNodeNameAlreadyExists(cd); } + final boolean deleteUnknownCores = + Boolean.parseBoolean( + System.getProperty("solr.cloud.delete.unknown.cores.enabled", "false")); + if (deleteUnknownCores && Files.exists(cd.getInstanceDir())) { + log.warn( + "There appears to be an existing directory for core {}, now deleting it", + cd.getName()); + SolrCore.deleteUnloadedCore(cd, true, true); Review Comment: The deletion operation on line 1521 (SolrCore.deleteUnloadedCore) could fail due to I/O errors, permission issues, or if files are locked. However, this failure is not explicitly caught and handled. If the deletion fails, the code continues to line 1526 (coresLocator.create), which will likely also fail since the directory still exists. Consider wrapping the deletion in a try-catch block to log specific deletion failures and provide a clearer error message to the user about why core creation failed, rather than letting it bubble up as a generic core creation failure. ```suggestion try { SolrCore.deleteUnloadedCore(cd, true, true); } catch (Exception e) { String msg = "Failed to delete existing instance directory for core '" + cd.getName() + "' at " + cd.getInstanceDir() + " before creating the core."; log.error(msg, e); throw new SolrException(ErrorCode.SERVER_ERROR, msg, e); } ``` ########## solr/packaging/test/test_create_collection_with_remnants.bats: ########## @@ -0,0 +1,92 @@ +#!/usr/bin/env bats + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Tests what happens when remnant core directories and and files still are on disk. Review Comment: Grammatical error: "directories and and files" should be "directories and files" (remove duplicate "and"). ```suggestion # Tests what happens when remnant core directories and files still are on disk. ``` ########## solr/core/src/java/org/apache/solr/core/CoreContainer.java: ########## @@ -1511,6 +1511,16 @@ public SolrCore create( preExistingZkEntry = getZkController().checkIfCoreNodeNameAlreadyExists(cd); } + final boolean deleteUnknownCores = + Boolean.parseBoolean( + System.getProperty("solr.cloud.delete.unknown.cores.enabled", "false")); + if (deleteUnknownCores && Files.exists(cd.getInstanceDir())) { + log.warn( + "There appears to be an existing directory for core {}, now deleting it", + cd.getName()); + SolrCore.deleteUnloadedCore(cd, true, true); + } Review Comment: The deletion logic on lines 1517-1522 deletes the remnant directory immediately before calling coresLocator.create() on line 1526. However, there's a potential race condition: if another process or thread creates files in the instance directory between the deletion (line 1521) and the coresLocator.create() call (line 1526), this could lead to unexpected behavior. Consider checking if the directory exists again right before calling coresLocator.create() or handling potential FileAlreadyExistsException from coresLocator.create(). ########## solr/core/src/java/org/apache/solr/core/CoreContainer.java: ########## @@ -1511,6 +1511,16 @@ public SolrCore create( preExistingZkEntry = getZkController().checkIfCoreNodeNameAlreadyExists(cd); } + final boolean deleteUnknownCores = + Boolean.parseBoolean( + System.getProperty("solr.cloud.delete.unknown.cores.enabled", "false")); + if (deleteUnknownCores && Files.exists(cd.getInstanceDir())) { + log.warn( + "There appears to be an existing directory for core {}, now deleting it", + cd.getName()); + SolrCore.deleteUnloadedCore(cd, true, true); + } Review Comment: The new deletion logic deletes directories without validating whether the existing directory actually contains remnant core files or is a valid/active core directory. The code only checks if the directory exists (Files.exists(cd.getInstanceDir())), but doesn't verify that it's truly a remnant (e.g., checking for the absence of a valid core descriptor in ZooKeeper or validating the directory is not in use). This could potentially delete legitimate core directories if there's a configuration issue or timing problem. Consider adding validation to confirm the directory is actually a remnant before deletion, such as checking if the core is registered in ZooKeeper or if it's currently loaded in the CoreContainer. ########## solr/packaging/test/test_create_collection_with_remnants.bats: ########## @@ -0,0 +1,92 @@ +#!/usr/bin/env bats + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Tests what happens when remnant core directories and and files still are on disk. +# Flip solr.delete.unknown.cores=false to see the out of box behavior and the failures. + +load bats_helper + +setup_file() { + common_clean_setup + solr start -Dsolr.delete.unknown.cores=true Review Comment: The BATS test uses an incorrect property name "solr.delete.unknown.cores" on line 25, but should use the new property name "solr.cloud.delete.unknown.cores.enabled" to be consistent with the property rename in this PR. While the PR description mentions this test will be deleted before merging, it should still use the correct property name while it exists in the codebase. ```suggestion # Flip solr.cloud.delete.unknown.cores.enabled=false to see the out of box behavior and the failures. load bats_helper setup_file() { common_clean_setup solr start -Dsolr.cloud.delete.unknown.cores.enabled=true ``` ########## solr/core/src/java/org/apache/solr/core/CoreContainer.java: ########## @@ -1511,6 +1511,16 @@ public SolrCore create( preExistingZkEntry = getZkController().checkIfCoreNodeNameAlreadyExists(cd); } + final boolean deleteUnknownCores = + Boolean.parseBoolean( + System.getProperty("solr.cloud.delete.unknown.cores.enabled", "false")); Review Comment: The property retrieval pattern on lines 1514-1516 is duplicated at line 1681-1683 (with the same property name). Consider extracting this into a private helper method or a constant field to avoid duplication and ensure consistency. For example, create a method like `private boolean shouldDeleteUnknownCores()` that can be reused in both locations. ########## solr/solrj/src/resources/DeprecatedSystemPropertyMappings.properties: ########## @@ -18,7 +19,8 @@ solr.auth.superuser=solr.authorization.superuser solr.cloud.shardsplit.checkdiskspace.enabled=solr.shard.split.check.disk.space.enabled solr.cloud.prep.recovery.read.timeout.additional.ms=prep.recovery.read.timeout.extra.wait solr.cloud.prs.enabled=solr.prs.default -solr.cloud.startup.delete.unknown.cores.enabled=solr.delete.unknown.cores +solr.cloud.delete.unknown.cores.enabled=solr.delete.unknown.cores +#solr.cloud.delete.unknown.cores.enabled=solr.cloud.startup.delete.unknown.cores.enabled Review Comment: The deprecated property mapping appears to be inverted. Based on the file header comment (line 4), when renaming a property, the format should be "new.property.name=old.property.name". Currently, line 22 maps the new name to an even older legacy name (solr.delete.unknown.cores), while the actual old property name (solr.cloud.startup.delete.unknown.cores.enabled) is commented out on line 23. The commented line 23 should be active, and line 22 should be removed or adjusted. Otherwise, users with the old property name "solr.cloud.startup.delete.unknown.cores.enabled" won't have it properly mapped to the new name. ```suggestion solr.cloud.delete.unknown.cores.enabled=solr.cloud.startup.delete.unknown.cores.enabled ``` ########## solr/core/src/test/org/apache/solr/cloud/DeleteCoreRemnantsOnCreateTest.java: ########## @@ -0,0 +1,327 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.solr.cloud; + +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.List; +import java.util.Optional; +import org.apache.solr.client.solrj.request.CollectionAdminRequest; +import org.apache.solr.client.solrj.request.CoreAdminRequest; +import org.apache.solr.client.solrj.response.json.JsonMapResponseParser; +import org.apache.solr.common.cloud.DocCollection; +import org.apache.solr.common.cloud.Replica; +import org.apache.solr.common.cloud.Slice; +import org.apache.solr.core.CoreDescriptor; +import org.apache.solr.core.SolrCore; +import org.apache.solr.embedded.JettySolrRunner; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +/** + * Solr occasionally gets into an inconsistent state with its cores lifecycle where remnant files + * are left on disk after various operations that delete a core. Examples include deleting a + * collection operation that doesn't properly finish, or maybe the Solr process unexpectedly gets + * killed. The system property "solr.cloud.delete.unknown.cores.enabled" is an expert setting that + * when enabled automatically deletes any remnant core data on disk when new cores are created that + * would otherwise fail due to the preexisting files. You should be cautious in enabling this + * feature, as it means that something isn't working well in your Solr setup. + */ +public class DeleteCoreRemnantsOnCreateTest extends SolrCloudTestCase { + private static final String DELETE_UNKNOWN_CORES_PROP = "solr.cloud.delete.unknown.cores.enabled"; + + @BeforeClass + public static void setupCluster() throws Exception { + configureCluster(1).addConfig("conf", configset("cloud-minimal")).configure(); + } + + @Before + public void resetProperty() { + System.clearProperty(DELETE_UNKNOWN_CORES_PROP); + } + + /** + * Shared setup for testing collection creation with remnants. Creates a collection, deletes it, + * and then leaves behind a remnant directory. + */ + private void setupCollectionRemnant(String collectionName) throws Exception { + List<JettySolrRunner> jettys = cluster.getJettySolrRunners(); + String primaryNode = jettys.getFirst().getNodeName(); + + CollectionAdminRequest.Create createRequest = + CollectionAdminRequest.createCollection(collectionName, "conf", 1, 1); + createRequest.setCreateNodeSet(primaryNode); + createRequest.process(cluster.getSolrClient()); + + waitForState( + "Expected collection to be fully active", + collectionName, + (n, c) -> SolrCloudTestCase.replicasForCollectionAreFullyActive(n, c, 1, 1)); + + Replica primaryReplica = getReplicaOnNode(collectionName, "shard1", primaryNode); + JettySolrRunner primaryJetty = cluster.getReplicaJetty(primaryReplica); + String originalCoreName = primaryReplica.getCoreName(); + Path remnantInstanceDir; + try (SolrCore core = primaryJetty.getCoreContainer().getCore(originalCoreName)) { + CoreDescriptor cd = core.getCoreDescriptor(); + remnantInstanceDir = cd.getInstanceDir(); + } + + CollectionAdminRequest.deleteCollection(collectionName).process(cluster.getSolrClient()); + waitForState("Expected collection deletion", collectionName, (n, c) -> c == null); + + // Simulate a core remnant still exists by creating the directory and core.properties + Files.createDirectories(remnantInstanceDir); + Files.writeString(remnantInstanceDir.resolve("core.properties"), "", StandardCharsets.UTF_8); + } + + /** + * Shared setup for testing replica addition with remnants. Creates a collection, then simulates a + * remnant directory on the single node that will impact the next addReplica command. + */ + private void setupReplicaRemnant(String collectionName) throws Exception { + List<JettySolrRunner> jettys = cluster.getJettySolrRunners(); + String primaryNode = jettys.getFirst().getNodeName(); + + CollectionAdminRequest.Create createRequest = + CollectionAdminRequest.createCollection(collectionName, "conf", 1, 1); + createRequest.setCreateNodeSet(primaryNode); + createRequest.process(cluster.getSolrClient()); + + waitForState( + "Expected collection to be fully active", + collectionName, + (n, c) -> SolrCloudTestCase.replicasForCollectionAreFullyActive(n, c, 1, 1)); + + int nextReplicaIndex = 3; // Yep, from 1 to 3 due to how we count in ZK and setup. + String expectedNewReplicaName = collectionName + "_shard1_replica_n" + nextReplicaIndex; + + // Simulate a core remnant on the single node adjacent to the existing replica instance path + Replica existing = getReplicaOnNode(collectionName, "shard1", primaryNode); + try (SolrCore core = + cluster.getReplicaJetty(existing).getCoreContainer().getCore(existing.getCoreName())) { + Path siblingDir = core.getInstancePath().getParent().resolve(expectedNewReplicaName); + Files.createDirectories(siblingDir); + Files.writeString( + siblingDir.resolve("core.properties"), + "name=" + + expectedNewReplicaName + + "_remnant\n" + + "collection=" + + collectionName + + "_remnant\n" + + "shard=shard1\n" + + "coreNodeName=core_node_remnant\n", + StandardCharsets.UTF_8); + } + } + + /** + * Shared setup for testing DeleteCore admin API with remnants. Creates a collection, deletes it, + * and then leaves behind a remnant core directory. + */ + private String setupCoreRemnantForUnloadCoreOperation(String collectionName) throws Exception { + List<JettySolrRunner> jettys = cluster.getJettySolrRunners(); + String primaryNode = jettys.getFirst().getNodeName(); + + CollectionAdminRequest.Create createRequest = + CollectionAdminRequest.createCollection(collectionName, "conf", 1, 1); + createRequest.setCreateNodeSet(primaryNode); + createRequest.process(cluster.getSolrClient()); + + waitForState( + "Expected collection to be fully active", + collectionName, + (n, c) -> SolrCloudTestCase.replicasForCollectionAreFullyActive(n, c, 1, 1)); + + Replica primaryReplica = getReplicaOnNode(collectionName, "shard1", primaryNode); + JettySolrRunner primaryJetty = cluster.getReplicaJetty(primaryReplica); + String originalCoreName = primaryReplica.getCoreName(); + Path remnantInstanceDir; + try (SolrCore core = primaryJetty.getCoreContainer().getCore(originalCoreName)) { + CoreDescriptor cd = core.getCoreDescriptor(); + remnantInstanceDir = cd.getInstanceDir(); + } + + CollectionAdminRequest.deleteCollection(collectionName).process(cluster.getSolrClient()); + waitForState("Expected collection deletion", collectionName, (n, c) -> c == null); + + // Simulate a core remnant still exists by creating the directory and core.properties + Files.createDirectories(remnantInstanceDir); + Files.writeString( + remnantInstanceDir.resolve("core.properties"), + "name=" + originalCoreName + "\n", + StandardCharsets.UTF_8); + + return originalCoreName; + } + + @Test + public void testCreateCollectionWithRemnantsFailsWithoutSetting() throws Exception { + assertNull( + "Property should not be set by default", System.getProperty(DELETE_UNKNOWN_CORES_PROP)); + + String collectionName = "coreRemnantCreateNoSetting"; + setupCollectionRemnant(collectionName); + + // Try to create the collection again - this demonstrates the behavior without the setting + // In typical environments, this might fail, but behavior depends on configuration + CollectionAdminRequest.Create recreateRequest = + CollectionAdminRequest.createCollection(collectionName, "conf", 1, 1); + List<JettySolrRunner> jettys = cluster.getJettySolrRunners(); + recreateRequest.setCreateNodeSet(jettys.getFirst().getNodeName()); + + // The request to create a collection SHOULD fail based on the remnant file, if it does not it + // means we've changed Solr's behavior when creating a core and + // remnants exist, and therefore we should rethink the utility of this setting. + try { + recreateRequest.process(cluster.getSolrClient()); + fail("This request to recreate the collection should have failed due to remnant files."); + } catch (Exception e) { + assertTrue( + "Verify the exception was due to core creation failed.", + e.getMessage().contains("Underlying core creation failed")); + } + } + + @Test + public void testCreateCollectionWithRemnantsWithSetting() throws Exception { + System.setProperty(DELETE_UNKNOWN_CORES_PROP, "true"); + + String collectionName = "coreRemnantCreateWithSetting"; + setupCollectionRemnant(collectionName); + + // With the setting enabled, collection creation should succeed despite remnants + CollectionAdminRequest.Create recreateRequest = + CollectionAdminRequest.createCollection(collectionName, "conf", 1, 1); + List<JettySolrRunner> jettys = cluster.getJettySolrRunners(); + recreateRequest.setCreateNodeSet(jettys.getFirst().getNodeName()); + recreateRequest.process(cluster.getSolrClient()); Review Comment: The error message assertion on line 215 is fragile. It checks for the exact string "ADDREPLICA failed to create replica", which could change in future Solr versions or may not be the exact message in all failure scenarios. Consider checking for a more general pattern or multiple possible error indicators to make the test more robust to message changes. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected] --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
