BryanMLima commented on code in PR #8875:
URL: https://github.com/apache/cloudstack/pull/8875#discussion_r1555813585
##########
engine/schema/src/main/java/com/cloud/storage/dao/VolumeDaoImpl.java:
##########
@@ -72,8 +79,14 @@ public class VolumeDaoImpl extends GenericDaoBase<VolumeVO,
Long> implements Vol
protected GenericSearchBuilder<VolumeVO, SumCount> primaryStorageSearch2;
protected GenericSearchBuilder<VolumeVO, SumCount> secondaryStorageSearch;
private final SearchBuilder<VolumeVO> poolAndPathSearch;
+ protected SearchBuilder<VolumeVO> volumePoolNotInClusterSearch;
+
@Inject
ResourceTagDao _tagsDao;
+ @Inject
+ HostDao _hostDao;
+ @Inject
+ VMInstanceDao _vmDao;
Review Comment:
I would encourage to not use the old pattern of using `_` for private class
variables, as it is not part of the Java naming conventions. TBH, we should
probably update the CloudStack [coding
conventions](https://cwiki.apache.org/confluence/display/CLOUDSTACK/Coding+conventions)
to reflect this as well.
##########
test/integration/smoke/test_primary_storage.py:
##########
@@ -711,4 +854,4 @@ def test_03_migration_options_storage_tags(self):
self.debug("Suitable storage pools found: %s" % len(pools_suitable))
self.assertEqual(0, len(pools_suitable), "Check that there is no
migration option for volume")
- return
+ return
Review Comment:
The pre-commit lint is failing because of the removed line at the EOF.
##########
server/src/main/java/com/cloud/storage/StorageManagerImpl.java:
##########
@@ -1148,6 +1151,95 @@ public PrimaryDataStoreInfo
updateStoragePool(UpdateStoragePoolCmd cmd) throws I
return (PrimaryDataStoreInfo)_dataStoreMgr.getDataStore(pool.getId(),
DataStoreRole.Primary);
}
+ @Override
+ public boolean changeStoragePoolScope(ChangeStoragePoolScopeCmd cmd)
throws IllegalArgumentException, InvalidParameterValueException,
PermissionDeniedException {
Review Comment:
Same here for this method, it only returns true or throws an exception.
##########
server/src/main/java/com/cloud/storage/StorageManagerImpl.java:
##########
@@ -1148,6 +1151,95 @@ public PrimaryDataStoreInfo
updateStoragePool(UpdateStoragePoolCmd cmd) throws I
return (PrimaryDataStoreInfo)_dataStoreMgr.getDataStore(pool.getId(),
DataStoreRole.Primary);
}
+ @Override
+ public boolean changeStoragePoolScope(ChangeStoragePoolScopeCmd cmd)
throws IllegalArgumentException, InvalidParameterValueException,
PermissionDeniedException {
+ Long id = cmd.getId();
+
+ Long accountId = cmd.getEntityOwnerId();
+ if (!_accountMgr.isRootAdmin(accountId)) {
+ throw new PermissionDeniedException("Only root admin can perform
this operation");
+ }
+
+ ScopeType newScope = EnumUtils.getEnumIgnoreCase(ScopeType.class,
cmd.getScope());
+ if (newScope != ScopeType.ZONE && newScope != ScopeType.CLUSTER) {
+ throw new InvalidParameterValueException("Invalid scope " +
newScope.toString() + "for Primary storage");
+ }
+
+ StoragePoolVO primaryStorage = _storagePoolDao.findById(id);
+ if (primaryStorage == null) {
+ throw new IllegalArgumentException("Unable to find storage pool
with ID: " + id);
+ }
+
+ if (!primaryStorage.getStatus().equals(StoragePoolStatus.Disabled)) {
+ throw new InvalidParameterValueException("Scope of the Primary
storage with id "
+ + primaryStorage.getUuid() +
+ " cannot be changed, as it is not in the Disabled state");
+ }
+
+ ScopeType currentScope = primaryStorage.getScope();
+ if (currentScope.equals(newScope)) {
+ throw new InvalidParameterValueException("New scope must be
different than the current scope");
+ }
+
+ HypervisorType hypervisorType;
+ if (currentScope.equals(ScopeType.CLUSTER)) {
+ /*
+ * For cluster wide primary storage the hypervisor type might not
be set.
+ * So, get it from the clusterVO.
+ */
+ Long clusterId = primaryStorage.getClusterId();
+ ClusterVO clusterVO = _clusterDao.findById(clusterId);
+ hypervisorType = clusterVO.getHypervisorType();
+ } else {
+ hypervisorType = primaryStorage.getHypervisor();
+ }
+ if (!zoneWidePoolSupportedHypervisorTypes.contains(hypervisorType)) {
+ throw new InvalidParameterValueException("Primary storage scope
change is not supported for hypervisor type " + hypervisorType);
+ }
+
+ String providerName = primaryStorage.getStorageProviderName();
+ DataStoreProvider storeProvider =
_dataStoreProviderMgr.getDataStoreProvider(providerName);
+ PrimaryDataStoreLifeCycle lifeCycle = (PrimaryDataStoreLifeCycle)
storeProvider.getDataStoreLifeCycle();
+ DataStore primaryStore = _dataStoreMgr.getPrimaryDataStore(id);
+
+ Long zoneId = primaryStorage.getDataCenterId();
+ DataCenterVO zone = _dcDao.findById(zoneId);
+ if (zone == null) {
+ throw new InvalidParameterValueException("Unable to find zone by
id " + zoneId);
+ }
+ if (Grouping.AllocationState.Disabled == zone.getAllocationState()) {
+ throw new PermissionDeniedException("Cannot perform this
operation, Zone is currently disabled: " + zoneId);
+ }
+
+ if (newScope.equals(ScopeType.ZONE)) {
+ ClusterScope clusterScope = new
ClusterScope(primaryStorage.getClusterId(), null, zoneId);
+ lifeCycle.changeStoragePoolScopeToZone(primaryStore, clusterScope,
hypervisorType);
+
+ } else {
+ Long clusterId = cmd.getClusterId();
+ if (clusterId == null) {
+ throw new IllegalArgumentException("Unable to find cluster
with ID: " + clusterId);
+ }
+ ClusterVO clusterVO = _clusterDao.findById(clusterId);
+ if (clusterVO == null) {
+ throw new InvalidParameterValueException("Unable to find
cluster by id " + clusterId);
+ }
+ if (Grouping.AllocationState.Disabled ==
clusterVO.getAllocationState()) {
+ throw new PermissionDeniedException("Cannot perform this
operation, Cluster is currently disabled: " + zoneId);
+ }
+ List<VirtualMachine.State> states = Arrays.asList(State.Starting,
State.Running, State.Stopping, State.Migrating, State.Restoring);
+ List<VolumeVO> volumesInOtherClusters =
volumeDao.listByPoolIdVMStatesNotInCluster(clusterId, states, id);
+ if (volumesInOtherClusters.size() > 0) {
+ throw new CloudRuntimeException("Cannot change scope of the
pool " + primaryStorage.getName() + " to cluster " + clusterVO.getName() + " as
there are associated volumes present for other clusters");
+ }
+
+ ClusterScope clusterScope = new ClusterScope(clusterId,
clusterVO.getPodId(), zoneId);
+ lifeCycle.changeStoragePoolScopeToCluster(primaryStore,
clusterScope, hypervisorType);
+ }
+
+ return true;
+ }
Review Comment:
I know this method is not that complex and long, but you could break into
smaller ones, making the code more readable and add some unit test as well.
What do you think?
##########
server/src/main/java/com/cloud/storage/StorageManagerImpl.java:
##########
@@ -1148,6 +1150,91 @@ public PrimaryDataStoreInfo
updateStoragePool(UpdateStoragePoolCmd cmd) throws I
return (PrimaryDataStoreInfo)_dataStoreMgr.getDataStore(pool.getId(),
DataStoreRole.Primary);
}
+ @Override
+ public boolean changeStoragePoolScope(ChangeStoragePoolScopeCmd cmd)
throws IllegalArgumentException, InvalidParameterValueException,
PermissionDeniedException {
+ Long id = cmd.getId();
+
+ Long accountId = cmd.getEntityOwnerId();
+ if (!_accountMgr.isRootAdmin(accountId)) {
+ throw new PermissionDeniedException("Only root admin can perform
this operation");
+ }
+
+ ScopeType scopeType =
ScopeType.validateAndGetScopeType(cmd.getScope());
+ if (scopeType != ScopeType.ZONE && scopeType != ScopeType.CLUSTER) {
+ throw new InvalidParameterValueException("Invalid scope " +
scopeType.toString() + "for Primary storage");
+ }
+
+ StoragePoolVO primaryStorage = _storagePoolDao.findById(id);
+ if (primaryStorage == null) {
+ throw new IllegalArgumentException("Unable to find storage pool
with ID: " + id);
+ }
+
+ if
(!primaryStorage.getStorageProviderName().equals(DataStoreProvider.DEFAULT_PRIMARY))
{
+ throw new InvalidParameterValueException("Primary storage scope
change is only supported with "
+ + DataStoreProvider.DEFAULT_PRIMARY.toString() + " data
store provider");
+ }
+
+ if
(!primaryStorage.getPoolType().equals(Storage.StoragePoolType.NetworkFilesystem)
&&
+ !primaryStorage.getPoolType().equals(Storage.StoragePoolType.RBD))
{
+ throw new InvalidParameterValueException("Primary storage scope
change is not supported for protocol "
+ + primaryStorage.getPoolType().toString());
+ }
+
+ HypervisorType hypervisorType = primaryStorage.getHypervisor();
+ Set<HypervisorType> supportedHypervisorTypes =
Sets.newHashSet(HypervisorType.KVM, HypervisorType.VMware,
HypervisorType.Simulator);
+ if (!supportedHypervisorTypes.contains(hypervisorType)) {
+ throw new InvalidParameterValueException("Primary storage scope
change is not supported for hypervisor type " + hypervisorType);
+ }
+
+ if (StoragePoolStatus.Disabled != primaryStorage.getStatus()) {
+ throw new InvalidParameterValueException("Primary storage should
be disabled for this operation");
+ }
+
+ if (scopeType == primaryStorage.getScope()) {
+ throw new InvalidParameterValueException("Invalid scope change");
+ }
+
+ if (!primaryStorage.getStatus().equals(StoragePoolStatus.Disabled)) {
+ throw new InvalidParameterValueException("Scope of the Primary
storage with id "
+ + primaryStorage.getUuid() +
+ " cannot be changed, as it is not in the Disabled state");
+ }
+
+ String providerName = primaryStorage.getStorageProviderName();
+ DataStoreProvider storeProvider =
_dataStoreProviderMgr.getDataStoreProvider(providerName);
+ PrimaryDataStoreLifeCycle lifeCycle = (PrimaryDataStoreLifeCycle)
storeProvider.getDataStoreLifeCycle();
+ DataStore primaryStore = _dataStoreMgr.getPrimaryDataStore(id);
+
+ Long zoneId = primaryStorage.getDataCenterId();
+ DataCenterVO zone = _dcDao.findById(zoneId);
+ if (zone == null) {
+ throw new InvalidParameterValueException("unable to find zone by
id " + zoneId);
+ }
+ if (Grouping.AllocationState.Disabled == zone.getAllocationState()) {
Review Comment:
It is still using `==`.
##########
engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/AbstractPrimaryDataStoreLifeCycleImpl.java:
##########
@@ -0,0 +1,127 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.cloudstack.storage.datastore.lifecycle;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import javax.inject.Inject;
+
+import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
+import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper;
+import org.apache.log4j.Logger;
+
+import com.cloud.agent.AgentManager;
+import com.cloud.agent.api.Answer;
+import com.cloud.agent.api.DeleteStoragePoolCommand;
+import com.cloud.host.HostVO;
+import com.cloud.host.dao.HostDao;
+import com.cloud.hypervisor.Hypervisor.HypervisorType;
+import com.cloud.resource.ResourceManager;
+import com.cloud.storage.StorageManager;
+import com.cloud.storage.StoragePool;
+import com.cloud.storage.StoragePoolHostVO;
+import com.cloud.storage.dao.StoragePoolHostDao;
+import com.cloud.utils.Pair;
+
+public class AbstractPrimaryDataStoreLifeCycleImpl {
+ private static final Logger s_logger =
Logger.getLogger(AbstractPrimaryDataStoreLifeCycleImpl.class);
+ @Inject
+ AgentManager agentMgr;
+ @Inject
+ protected ResourceManager _resourceMgr;
+ @Inject
+ StorageManager storageMgr;
+ @Inject
+ PrimaryDataStoreHelper dataStoreHelper;
+ @Inject
+ protected HostDao _hostDao;
+ @Inject
+ protected StoragePoolHostDao _storagePoolHostDao;
+
+ private HypervisorType getHypervisorType(long hostId) {
+ HostVO host = _hostDao.findById(hostId);
+ if (host != null)
+ return host.getHypervisorType();
+ return HypervisorType.None;
+ }
+
+ private List<HostVO> getPoolHostsList(ClusterScope clusterScope,
HypervisorType hypervisorType) {
+
+ List<HostVO> hosts = new ArrayList<HostVO>();
+
+ if (hypervisorType != null) {
+ hosts = _resourceMgr
+
.listAllHostsInOneZoneNotInClusterByHypervisor(hypervisorType,
clusterScope.getZoneId(), clusterScope.getScopeId());
+ } else {
+ List<HostVO> xenServerHosts = _resourceMgr
+
.listAllHostsInOneZoneNotInClusterByHypervisor(HypervisorType.XenServer,
clusterScope.getZoneId(), clusterScope.getScopeId());
+ List<HostVO> vmWareServerHosts = _resourceMgr
+
.listAllHostsInOneZoneNotInClusterByHypervisor(HypervisorType.VMware,
clusterScope.getZoneId(), clusterScope.getScopeId());
+ List<HostVO> kvmHosts = _resourceMgr.
+
listAllHostsInOneZoneNotInClusterByHypervisor(HypervisorType.KVM,
clusterScope.getZoneId(), clusterScope.getScopeId());
+
+ hosts.addAll(xenServerHosts);
+ hosts.addAll(vmWareServerHosts);
+ hosts.addAll(kvmHosts);
+ }
+ return hosts;
+ }
+
+ public boolean changeStoragePoolScopeToZone(DataStore store, ClusterScope
clusterScope, HypervisorType hypervisorType) {
+ List<HostVO> hosts = getPoolHostsList(clusterScope, hypervisorType);
+ s_logger.debug("Changing scope of the storage pool to Zone");
+ for (HostVO host : hosts) {
+ try {
+ storageMgr.connectHostToSharedPool(host.getId(),
store.getId());
+ } catch (Exception e) {
+ s_logger.warn("Unable to establish a connection between " +
host + " and " + store, e);
+ }
+ }
+ dataStoreHelper.switchToZone(store, hypervisorType);
+ return true;
+ }
+
+ public boolean changeStoragePoolScopeToCluster(DataStore store,
ClusterScope clusterScope, HypervisorType hypervisorType) {
+ Pair<List<StoragePoolHostVO>, Integer> hostPoolRecords =
_storagePoolHostDao.listByPoolIdNotInCluster(clusterScope.getScopeId(),
store.getId());
+ s_logger.debug("Changing scope of the storage pool to Cluster");
+ HypervisorType hType = null;
+ if (hostPoolRecords.second() > 0) {
+ hType =
getHypervisorType(hostPoolRecords.first().get(0).getHostId());
+ }
+
+ StoragePool pool = (StoragePool) store;
+ for (StoragePoolHostVO host : hostPoolRecords.first()) {
+ DeleteStoragePoolCommand deleteCmd = new
DeleteStoragePoolCommand(pool);
+ final Answer answer = agentMgr.easySend(host.getHostId(),
deleteCmd);
+
+ if (answer != null && answer.getResult()) {
+ if (HypervisorType.KVM != hType) {
+ break;
+ }
+ } else {
+ if (answer != null) {
+ s_logger.debug("Failed to delete storage pool: " +
answer.getResult());
+ }
+ }
+ }
+ dataStoreHelper.switchToCluster(store, clusterScope);
+ return true;
+ }
Review Comment:
Both of these methods can return a boolean, however, they can only return
true, therefore, their output does not matter. It is better to change them to a
void method.
##########
engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/AbstractPrimaryDataStoreLifeCycleImpl.java:
##########
@@ -0,0 +1,127 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.cloudstack.storage.datastore.lifecycle;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import javax.inject.Inject;
+
+import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
+import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper;
+import org.apache.log4j.Logger;
+
+import com.cloud.agent.AgentManager;
+import com.cloud.agent.api.Answer;
+import com.cloud.agent.api.DeleteStoragePoolCommand;
+import com.cloud.host.HostVO;
+import com.cloud.host.dao.HostDao;
+import com.cloud.hypervisor.Hypervisor.HypervisorType;
+import com.cloud.resource.ResourceManager;
+import com.cloud.storage.StorageManager;
+import com.cloud.storage.StoragePool;
+import com.cloud.storage.StoragePoolHostVO;
+import com.cloud.storage.dao.StoragePoolHostDao;
+import com.cloud.utils.Pair;
+
+public class AbstractPrimaryDataStoreLifeCycleImpl {
+ private static final Logger s_logger =
Logger.getLogger(AbstractPrimaryDataStoreLifeCycleImpl.class);
+ @Inject
+ AgentManager agentMgr;
+ @Inject
+ protected ResourceManager _resourceMgr;
+ @Inject
+ StorageManager storageMgr;
+ @Inject
+ PrimaryDataStoreHelper dataStoreHelper;
+ @Inject
+ protected HostDao _hostDao;
+ @Inject
+ protected StoragePoolHostDao _storagePoolHostDao;
Review Comment:
The same applies here for class variables starting with `_`.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]