This is an automated email from the ASF dual-hosted git repository. dahn pushed a commit to branch 4.20 in repository https://gitbox.apache.org/repos/asf/cloudstack.git
commit dd84c74e822cefcbcd9c30ee4320d91ad0a0f4f7 Merge: 011fced91ed 88ce639255d Author: Daan Hoogland <[email protected]> AuthorDate: Tue May 13 11:41:36 2025 +0200 Merge branch '4.19' into 4.20 .../hypervisor/vmware/resource/VmwareResource.java | 386 +++++++++++---------- plugins/storage/volume/linstor/CHANGELOG.md | 5 + .../driver/LinstorPrimaryDataStoreDriverImpl.java | 78 ++++- .../util/LinstorConfigurationManager.java | 9 +- .../storage/datastore/util/LinstorUtil.java | 32 +- pom.xml | 2 +- .../java/com/cloud/alert/AlertManagerImpl.java | 96 ++--- .../java/com/cloud/network/NetworkServiceImpl.java | 6 + 8 files changed, 370 insertions(+), 244 deletions(-) diff --cc plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java index 711bac10c94,9e105749da9..04b9bdca39b --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java @@@ -77,8 -75,9 +77,7 @@@ import org.apache.commons.collections.C import org.apache.commons.collections.MapUtils; import org.apache.commons.lang.ArrayUtils; import org.apache.commons.lang.math.NumberUtils; --import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; -import org.apache.log4j.NDC; +import org.apache.logging.log4j.ThreadContext; import org.joda.time.Duration; import com.cloud.agent.IAgentControl; @@@ -293,6 -292,6 +292,7 @@@ import com.cloud.utils.FileUtil import com.cloud.utils.LogUtils; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; ++import com.cloud.utils.StringUtils; import com.cloud.utils.Ternary; import com.cloud.utils.db.DB; import com.cloud.utils.exception.CloudRuntimeException; @@@ -394,7 -394,7 +394,6 @@@ public class VmwareResource extends Ser // out an operation protected final int _retry = 24; -- protected final int _sleep = 10000; protected final int DefaultDomRSshPort = 3922; protected final int MazCmdMBean = 100; @@@ -420,10 -420,10 +419,10 @@@ protected DiskControllerType _rootDiskController = DiskControllerType.ide; protected ManagedObjectReference _morHyperHost; -- protected final static ThreadLocal<VmwareContext> s_serviceContext = new ThreadLocal<VmwareContext>(); ++ protected final static ThreadLocal<VmwareContext> s_serviceContext = new ThreadLocal<>(); protected String _hostName; -- protected List<PropertyMapDynamicBean> _cmdMBeans = new ArrayList<PropertyMapDynamicBean>(); ++ protected List<PropertyMapDynamicBean> _cmdMBeans = new ArrayList<>(); protected Gson _gson; @@@ -434,7 -434,7 +433,7 @@@ protected VirtualRoutingResource _vrResource; -- protected final static HashMap<VirtualMachinePowerState, PowerState> s_powerStatesTable = new HashMap<VirtualMachinePowerState, PowerState>(); ++ protected final static HashMap<VirtualMachinePowerState, PowerState> s_powerStatesTable = new HashMap<>(); static { s_powerStatesTable.put(VirtualMachinePowerState.POWERED_ON, PowerState.PowerOn); @@@ -468,8 -468,8 +467,8 @@@ @Override public Answer executeRequest(Command cmd) { logCommand(cmd); -- Answer answer = null; - NDC.push(getCommandLogTitle(cmd)); ++ Answer answer; + ThreadContext.push(getCommandLogTitle(cmd)); try { long cmdSequence = _cmdSequence++; Date startTime = DateUtil.currentGMTTime(); @@@ -698,13 -698,13 +697,13 @@@ String scriptChecksum = lines[1].trim(); String checksum = ChecksumUtil.calculateCurrentChecksum(sysVMName, "vms/cloud-scripts.tgz").trim(); -- if (!org.apache.commons.lang3.StringUtils.isEmpty(checksum) && checksum.equals(scriptChecksum) && !cmd.isForced()) { ++ if (!StringUtils.isEmpty(checksum) && checksum.equals(scriptChecksum) && !cmd.isForced()) { String msg = String.format("No change in the scripts checksum, not patching systemVM %s", sysVMName); - s_logger.info(msg); + logger.info(msg); return new PatchSystemVmAnswer(cmd, msg, lines[0], lines[1]); } -- Pair<Boolean, String> patchResult = null; ++ Pair<Boolean, String> patchResult; try { patchResult = SshHelper.sshExecute(controlIp, DefaultDomRSshPort, "root", pemFile, null, "/var/cache/cloud/patch-sysvms.sh", 10000, 10000, 600000); @@@ -732,7 -732,7 +731,7 @@@ private Answer execute(SetupPersistentNetworkCommand cmd) { VmwareHypervisorHost host = getHyperHost(getServiceContext()); -- String hostname = null; ++ String hostname; VmwareContext context = getServiceContext(); HostMO hostMO = new HostMO(context, host.getMor()); @@@ -757,7 -757,7 +756,7 @@@ if (storageNfsVersion != null) return; if (cmd instanceof CopyCommand) { -- EnumMap<VmwareStorageProcessorConfigurableFields, Object> params = new EnumMap<VmwareStorageProcessorConfigurableFields, Object>( ++ EnumMap<VmwareStorageProcessorConfigurableFields, Object> params = new EnumMap<>( VmwareStorageProcessorConfigurableFields.class); examineStorageSubSystemCommandNfsVersion((CopyCommand) cmd, params); params = examineStorageSubSystemCommandFullCloneFlagForVmware((CopyCommand) cmd, params); @@@ -796,10 -796,10 +795,10 @@@ if (destDataStore instanceof PrimaryDataStoreTO) { PrimaryDataStoreTO dest = (PrimaryDataStoreTO) destDataStore; if (dest.isFullCloneFlag() != null) { -- paramsCopy.put(VmwareStorageProcessorConfigurableFields.FULL_CLONE_FLAG, dest.isFullCloneFlag().booleanValue()); ++ paramsCopy.put(VmwareStorageProcessorConfigurableFields.FULL_CLONE_FLAG, dest.isFullCloneFlag()); } if (dest.getDiskProvisioningStrictnessFlag() != null) { -- paramsCopy.put(VmwareStorageProcessorConfigurableFields.DISK_PROVISIONING_STRICTNESS, dest.getDiskProvisioningStrictnessFlag().booleanValue()); ++ paramsCopy.put(VmwareStorageProcessorConfigurableFields.DISK_PROVISIONING_STRICTNESS, dest.getDiskProvisioningStrictnessFlag()); } } } @@@ -885,8 -885,8 +884,8 @@@ try { if (newSize < oldSize) { String errorMsg = String.format("VMware doesn't support shrinking volume from larger size [%s] GB to a smaller size [%s] GB. Can't resize volume of VM [name: %s].", -- oldSize / Float.valueOf(ResourceType.bytesToMiB), newSize / Float.valueOf(ResourceType.bytesToMiB), vmName); - s_logger.error(errorMsg); ++ oldSize / (float) ResourceType.bytesToMiB, newSize / (float) ResourceType.bytesToMiB, vmName); + logger.error(errorMsg); throw new Exception(errorMsg); } else if (newSize == oldSize) { return new ResizeVolumeAnswer(cmd, true, "success", newSize * ResourceType.bytesToKiB); @@@ -1021,13 -1021,13 +1020,15 @@@ // OfflineVmwareMigration: 6. check if a worker was used and destroy it if needed try { if (useWorkerVm) { - s_logger.info("Destroy worker VM after volume resize"); + logger.info("Destroy worker VM after volume resize"); -- vmMo.detachDisk(vmdkDataStorePath, false); -- vmMo.destroy(); ++ if (vmMo != null) { ++ vmMo.detachDisk(vmdkDataStorePath, false); ++ vmMo.destroy(); ++ } } } catch (Throwable e) { - s_logger.error(String.format("Failed to destroy worker VM [name: %s] due to: [%s].", vmName, e.getMessage()), e); + logger.error(String.format("Failed to destroy worker VM [name: %s] due to: [%s].", vmName, e.getMessage()), e); } } } @@@ -1116,13 -1116,13 +1117,11 @@@ if (cmd.getOption() != null && cmd.getOption().equals("create")) { String result = networkUsage(cmd.getPrivateIP(), "create", null); -- NetworkUsageAnswer answer = new NetworkUsageAnswer(cmd, result, 0L, 0L); -- return answer; ++ return new NetworkUsageAnswer(cmd, result, 0L, 0L); } long[] stats = getNetworkStats(cmd.getPrivateIP(), null); -- NetworkUsageAnswer answer = new NetworkUsageAnswer(cmd, "", stats[0], stats[1]); -- return answer; ++ return new NetworkUsageAnswer(cmd, "", stats[0], stats[1]); } protected NetworkUsageAnswer VPCNetworkUsage(NetworkUsageCommand cmd) { @@@ -1197,8 -1197,8 +1196,8 @@@ } protected Answer execute(GetAutoScaleMetricsCommand cmd) { -- Long bytesSent; -- Long bytesReceived; ++ long bytesSent; ++ long bytesReceived; if (cmd.isForVpc()) { long[] stats = getVPCNetworkStats(cmd.getPrivateIP(), cmd.getPublicIP(), "get", ""); bytesSent = stats[0]; @@@ -1218,14 -1218,14 +1217,14 @@@ switch (metrics.getCounter()) { case NETWORK_RECEIVED_AVERAGE_MBPS: values.add(new VirtualRouterAutoScale.AutoScaleMetricsValue(metrics, VirtualRouterAutoScale.AutoScaleValueType.AGGREGATED_VM_GROUP, -- Double.valueOf(bytesReceived) / VirtualRouterAutoScale.MBITS_TO_BYTES)); ++ (double) bytesReceived / VirtualRouterAutoScale.MBITS_TO_BYTES)); break; case NETWORK_TRANSMIT_AVERAGE_MBPS: values.add(new VirtualRouterAutoScale.AutoScaleMetricsValue(metrics, VirtualRouterAutoScale.AutoScaleValueType.AGGREGATED_VM_GROUP, -- Double.valueOf(bytesSent) / VirtualRouterAutoScale.MBITS_TO_BYTES)); ++ (double) bytesSent / VirtualRouterAutoScale.MBITS_TO_BYTES)); break; case LB_AVERAGE_CONNECTIONS: -- values.add(new VirtualRouterAutoScale.AutoScaleMetricsValue(metrics, VirtualRouterAutoScale.AutoScaleValueType.INSTANT_VM, Double.valueOf(lbConnections))); ++ values.add(new VirtualRouterAutoScale.AutoScaleMetricsValue(metrics, VirtualRouterAutoScale.AutoScaleValueType.INSTANT_VM, (double) lbConnections)); break; } } @@@ -1237,9 -1237,9 +1236,9 @@@ public ExecutionResult createFileInVR(String routerIp, String filePath, String fileName, String content) { File keyFile = getSystemVmKeyFile(); try { -- SshHelper.scpTo(routerIp, 3922, "root", keyFile, null, filePath, content.getBytes("UTF-8"), fileName, null); ++ SshHelper.scpTo(routerIp, 3922, "root", keyFile, null, filePath, content.getBytes(StringUtils.getPreferredCharset()), fileName, null); } catch (Exception e) { - s_logger.warn("Fail to create file " + filePath + fileName + " in VR " + routerIp, e); + logger.warn("Fail to create file " + filePath + fileName + " in VR " + routerIp, e); return new ExecutionResult(false, e.getMessage()); } return new ExecutionResult(true, null); @@@ -1284,8 -1284,8 +1283,8 @@@ // private int findRouterEthDeviceIndex(String domrName, String routerIp, String mac) throws Exception { File keyFile = getSystemVmKeyFile(); - s_logger.info("findRouterEthDeviceIndex. mac: " + mac); - ArrayList<String> skipInterfaces = new ArrayList<String>(Arrays.asList("all", "default", "lo")); + logger.info("findRouterEthDeviceIndex. mac: " + mac); - ArrayList<String> skipInterfaces = new ArrayList<String>(Arrays.asList("all", "default", "lo")); ++ ArrayList<String> skipInterfaces = new ArrayList<>(Arrays.asList("all", "default", "lo")); // when we dynamically plug in a new NIC into virtual router, it may take time to show up in guest OS // we use a waiting loop here as a workaround to synchronize activities in systems @@@ -1352,8 -1352,8 +1351,8 @@@ int ethDeviceNum = findRouterEthDeviceIndex(domrName, routerIp, nic.getMac()); nic.setDeviceId(ethDeviceNum); } catch (Exception e) { -- String msg = "Prepare SetupGuestNetwork failed due to " + e.toString(); - s_logger.warn(msg, e); ++ String msg = "Prepare SetupGuestNetwork failed due to " + e; + logger.warn(msg, e); return new ExecutionResult(false, msg); } return new ExecutionResult(true, null); @@@ -1396,8 -1396,8 +1395,8 @@@ int ethDeviceNum = findRouterEthDeviceIndex(routerName, routerIp, pubIp.getVifMacAddress()); pubIp.setNicDevId(ethDeviceNum); } catch (Exception e) { -- String msg = "Prepare Ip SNAT failure due to " + e.toString(); - s_logger.error(msg, e); ++ String msg = "Prepare Ip SNAT failure due to " + e; + logger.error(msg, e); return new ExecutionResult(false, e.toString()); } return new ExecutionResult(true, null); @@@ -1412,8 -1412,8 +1411,8 @@@ int ethDeviceNum = findRouterEthDeviceIndex(routerName, routerIp, nic.getMac()); nic.setDeviceId(ethDeviceNum); } catch (Exception e) { -- String msg = "Prepare SetNetworkACL failed due to " + e.toString(); - s_logger.error(msg, e); ++ String msg = "Prepare SetNetworkACL failed due to " + e; + logger.error(msg, e); return new ExecutionResult(false, msg); } return new ExecutionResult(true, null); @@@ -1432,8 -1432,8 +1431,8 @@@ plugNicCommandInternal(cmd.getVmName(), nicDeviceType, cmd.getNic(), cmd.getVMType()); return new PlugNicAnswer(cmd, true, "success"); } catch (Exception e) { - s_logger.error("Unexpected exception: ", e); - return new PlugNicAnswer(cmd, false, "Unable to execute PlugNicCommand due to " + e.toString()); + logger.error("Unexpected exception: ", e); - return new PlugNicAnswer(cmd, false, "Unable to execute PlugNicCommand due to " + e.toString()); ++ return new PlugNicAnswer(cmd, false, "Unable to execute PlugNicCommand due to " + e); } } @@@ -1479,8 -1479,8 +1478,8 @@@ deviceNumber++; VirtualDevice nic; - Pair<ManagedObjectReference, String> networkInfo = prepareNetworkFromNicInfo(vmMo.getRunningHost(), nicTo, false, vmType); - String dvSwitchUuid = null; + Pair<ManagedObjectReference, String> networkInfo = prepareNetworkFromNicInfo(vmMo.getRunningHost(), nicTo, false, nicTo.getNetworkSegmentName(), vmType); - String dvSwitchUuid = null; ++ String dvSwitchUuid; if (VmwareHelper.isDvPortGroup(networkInfo.first())) { ManagedObjectReference dcMor = hyperHost.getHyperHostDatacenter(); DatacenterMO dataCenterMo = new DatacenterMO(context, dcMor); @@@ -1541,8 -1541,8 +1540,8 @@@ return new ReplugNicAnswer(cmd, false, "Nic to replug not found"); } - Pair<ManagedObjectReference, String> networkInfo = prepareNetworkFromNicInfo(vmMo.getRunningHost(), nicTo, false, cmd.getVMType()); - String dvSwitchUuid = null; + Pair<ManagedObjectReference, String> networkInfo = prepareNetworkFromNicInfo(vmMo.getRunningHost(), nicTo, false, null, cmd.getVMType()); - String dvSwitchUuid = null; ++ String dvSwitchUuid; if (VmwareHelper.isDvPortGroup(networkInfo.first())) { ManagedObjectReference dcMor = hyperHost.getHyperHostDatacenter(); DatacenterMO dataCenterMo = new DatacenterMO(context, dcMor); @@@ -1560,8 -1560,8 +1559,8 @@@ return new ReplugNicAnswer(cmd, true, "success"); } catch (Exception e) { - s_logger.error("Unexpected exception: ", e); - return new ReplugNicAnswer(cmd, false, "Unable to execute ReplugNicCommand due to " + e.toString()); + logger.error("Unexpected exception: ", e); - return new ReplugNicAnswer(cmd, false, "Unable to execute ReplugNicCommand due to " + e.toString()); ++ return new ReplugNicAnswer(cmd, false, "Unable to execute ReplugNicCommand due to " + e); } } @@@ -1601,14 -1601,14 +1600,14 @@@ return new UnPlugNicAnswer(cmd, true, "success"); } catch (Exception e) { - s_logger.error("Unexpected exception: ", e); - return new UnPlugNicAnswer(cmd, false, "Unable to execute unPlugNicCommand due to " + e.toString()); + logger.error("Unexpected exception: ", e); - return new UnPlugNicAnswer(cmd, false, "Unable to execute unPlugNicCommand due to " + e.toString()); ++ return new UnPlugNicAnswer(cmd, false, "Unable to execute unPlugNicCommand due to " + e); } } private void plugPublicNic(VirtualMachineMO vmMo, final String vlanId, final IpAddressTO ipAddressTO) throws Exception { // TODO : probably need to set traffic shaping -- Pair<ManagedObjectReference, String> networkInfo = null; ++ Pair<ManagedObjectReference, String> networkInfo; VirtualSwitchType vSwitchType = VirtualSwitchType.StandardVirtualSwitch; if (_publicTrafficInfo != null) { vSwitchType = _publicTrafficInfo.getVirtualSwitchType(); @@@ -1749,7 -1749,7 +1748,7 @@@ ip.setNewNic(addVif); } } catch (Throwable e) { - logger.error("Unexpected exception: " + e.toString() + " will shortcut rest of IPAssoc commands", e); - s_logger.error("Unexpected exception: " + e.toString() + " will shortcut rest of IPAssoc commands", e); ++ logger.error("Unexpected exception: " + e + " will shortcut rest of IPAssoc commands", e); return new ExecutionResult(false, e.toString()); } return new ExecutionResult(true, null); @@@ -1793,7 -1793,7 +1792,7 @@@ configureNicDevice(vmMo, nicInfo.first(), VirtualDeviceConfigSpecOperation.REMOVE, "unplugNicCommand"); } } catch (Throwable e) { - logger.error("Unexpected exception: " + e.toString() + " will shortcut rest of IPAssoc commands", e); - s_logger.error("Unexpected exception: " + e.toString() + " will shortcut rest of IPAssoc commands", e); ++ logger.error("Unexpected exception: " + e + " will shortcut rest of IPAssoc commands", e); return new ExecutionResult(false, e.toString()); } return new ExecutionResult(true, null); @@@ -1849,11 -1849,11 +1848,11 @@@ VRScripts.CONNECTION_TIMEOUT, VRScripts.CONNECTION_TIMEOUT, timeout); } catch (Exception e) { String msg = "Command failed due to " + VmwareHelper.getExceptionMessage(e); - s_logger.error(msg); - result = new Pair<Boolean, String>(false, msg); + logger.error(msg); - result = new Pair<Boolean, String>(false, msg); ++ result = new Pair<>(false, msg); } - if (s_logger.isDebugEnabled()) { - s_logger.debug(script + " execution result: " + result.first().toString()); + if (logger.isDebugEnabled()) { + logger.debug(script + " execution result: " + result.first().toString()); } return new ExecutionResult(result.first(), result.second()); } @@@ -1894,7 -1894,7 +1893,7 @@@ } private DiskTO[] validateDisks(DiskTO[] disks) { -- List<DiskTO> validatedDisks = new ArrayList<DiskTO>(); ++ List<DiskTO> validatedDisks = new ArrayList<>(); for (DiskTO vol : disks) { if (vol.getType() != Volume.Type.ISO) { @@@ -1967,8 -1967,8 +1966,8 @@@ throw new Exception("Unable to execute ScaleVmCommand"); } } catch (Exception e) { - s_logger.error(String.format("ScaleVmCommand failed due to: [%s].", VmwareHelper.getExceptionMessage(e)), e); - return new ScaleVmAnswer(cmd, false, String.format("Unable to execute ScaleVmCommand due to: [%s].", e.toString())); + logger.error(String.format("ScaleVmCommand failed due to: [%s].", VmwareHelper.getExceptionMessage(e)), e); - return new ScaleVmAnswer(cmd, false, String.format("Unable to execute ScaleVmCommand due to: [%s].", e.toString())); ++ return new ScaleVmAnswer(cmd, false, String.format("Unable to execute ScaleVmCommand due to: [%s].", e)); } return new ScaleVmAnswer(cmd, true, null); } @@@ -2037,8 -2037,8 +2036,8 @@@ String existingVmName = null; VirtualMachineFileInfo existingVmFileInfo = null; VirtualMachineFileLayoutEx existingVmFileLayout = null; -- List<DatastoreMO> existingDatastores = new ArrayList<DatastoreMO>(); -- String diskStoragePolicyId = null; ++ List<DatastoreMO> existingDatastores = new ArrayList<>(); ++ String diskStoragePolicyId; String vmStoragePolicyId = null; VirtualMachineDefinedProfileSpec diskProfileSpec = null; VirtualMachineDefinedProfileSpec vmProfileSpec = null; @@@ -2187,7 -2187,7 +2186,7 @@@ } } tearDownVm(vmMo); -- } else if (!hyperHost.createBlankVm(vmNameOnVcenter, vmInternalCSName, vmSpec.getCpus(), vmSpec.getMaxSpeed().intValue(), getReservedCpuMHZ(vmSpec), ++ } else if (!hyperHost.createBlankVm(vmNameOnVcenter, vmInternalCSName, vmSpec.getCpus(), vmSpec.getMaxSpeed(), getReservedCpuMHZ(vmSpec), vmSpec.getLimitCpuUse(), (int) (vmSpec.getMaxRam() / ResourceType.bytesToMiB), getReservedMemoryMb(vmSpec), guestOsId, rootDiskDataStoreDetails.first(), false, controllerInfo, systemVm)) { throw new Exception("Failed to create VM. vmName: " + vmInternalCSName); @@@ -2385,54 -2388,9 +2387,9 @@@ if (!hasSnapshot) { deviceConfigSpecArray[i] = new VirtualDeviceConfigSpec(); - VolumeObjectTO volumeTO = (VolumeObjectTO) vol.getData(); - DataStoreTO primaryStore = volumeTO.getDataStore(); - Map<String, String> details = vol.getDetails(); - boolean managed = false; - String iScsiName = null; - - if (details != null) { - managed = Boolean.parseBoolean(details.get(DiskTO.MANAGED)); - iScsiName = details.get(DiskTO.IQN); - } - - String primaryStoreUuid = primaryStore.getUuid(); - // if the storage is managed, iScsiName should not be null - String datastoreName = managed ? VmwareResource.getDatastoreName(iScsiName) : primaryStoreUuid; - Pair<ManagedObjectReference, DatastoreMO> volumeDsDetails = dataStoresDetails.get(datastoreName); - - assert (volumeDsDetails != null); - if (volumeDsDetails == null) { - throw new Exception("Primary datastore " + primaryStore.getUuid() + " is not mounted on host."); - } - - if (vol.getDetails().get(DiskTO.PROTOCOL_TYPE) != null && vol.getDetails().get(DiskTO.PROTOCOL_TYPE).equalsIgnoreCase("DatastoreCluster")) { - if (diskInfoBuilder != null && matchingExistingDisk != null) { - String[] diskChain = matchingExistingDisk.getDiskChain(); - if (diskChain != null && diskChain.length > 0) { - DatastoreFile file = new DatastoreFile(diskChain[0]); - if (!file.getFileBaseName().equalsIgnoreCase(volumeTO.getPath())) { - if (logger.isInfoEnabled()) - logger.info("Detected disk-chain top file change on volume: " + volumeTO.getId() + " " + volumeTO.getPath() + " -> " + file.getFileBaseName()); - volumeTO.setPath(file.getFileBaseName()); - } - } - DatastoreMO diskDatastoreMofromVM = getDataStoreWhereDiskExists(hyperHost, context, diskInfoBuilder, vol, diskDatastores); - if (diskDatastoreMofromVM != null) { - String actualPoolUuid = diskDatastoreMofromVM.getCustomFieldValue(CustomFieldConstants.CLOUD_UUID); - if (actualPoolUuid != null && !actualPoolUuid.equalsIgnoreCase(primaryStore.getUuid())) { - volumeDsDetails = new Pair<>(diskDatastoreMofromVM.getMor(), diskDatastoreMofromVM); - if (logger.isInfoEnabled()) - logger.info("Detected datastore uuid change on volume: " + volumeTO.getId() + " " + primaryStore.getUuid() + " -> " + actualPoolUuid); - ((PrimaryDataStoreTO)primaryStore).setUuid(actualPoolUuid); - } - } - } - } - String[] diskChain = syncDiskChain(dcMo, vmMo, vol, matchingExistingDisk, volumeDsDetails.second()); -- int deviceNumber = -1; ++ int deviceNumber; if (controllerKey == vmMo.getIDEControllerKey(ideUnitNumber)) { deviceNumber = ideUnitNumber % VmwareHelper.MAX_ALLOWED_DEVICES_IDE_CONTROLLER; ideUnitNumber++; @@@ -2441,9 -2399,10 +2398,10 @@@ scsiUnitNumber++; } + VolumeObjectTO volumeTO = (VolumeObjectTO) vol.getData(); Long maxIops = volumeTO.getIopsWriteRate() + volumeTO.getIopsReadRate(); VirtualDevice device = VmwareHelper.prepareDiskDevice(vmMo, null, controllerKey, diskChain, volumeDsDetails.first(), deviceNumber, i + 1, maxIops); - s_logger.debug(LogUtils.logGsonWithoutException("The following definitions will be used to start the VM: virtual device [%s], volume [%s].", device, volumeTO)); + logger.debug(LogUtils.logGsonWithoutException("The following definitions will be used to start the VM: virtual device [%s], volume [%s].", device, volumeTO)); diskStoragePolicyId = volumeTO.getvSphereStoragePolicyId(); if (StringUtils.isNotEmpty(diskStoragePolicyId)) { @@@ -2508,9 -2467,9 +2466,9 @@@ NiciraNvpApiVersion.logNiciraApiVersion(); -- Map<String, String> nicUuidToDvSwitchUuid = new HashMap<String, String>(); ++ Map<String, String> nicUuidToDvSwitchUuid = new HashMap<>(); for (NicTO nicTo : sortNicsByDeviceId(nics)) { - s_logger.info("Prepare NIC device based on NicTO: " + _gson.toJson(nicTo)); + logger.info("Prepare NIC device based on NicTO: " + _gson.toJson(nicTo)); String adapterTypeStr = deployAsIs ? mapAdapterType(deployAsIsInfo.getNicAdapterMap().get(nicTo.getDeviceId())) : @@@ -2572,7 -2530,7 +2530,7 @@@ // // pass boot arguments through machine.id & perform customized options to VMX -- ArrayList<OptionValue> extraOptions = new ArrayList<OptionValue>(); ++ ArrayList<OptionValue> extraOptions = new ArrayList<>(); configBasicExtraOption(extraOptions, vmSpec); if (deployAsIs) { @@@ -2678,12 -2632,12 +2636,12 @@@ // Since VM was successfully powered-on, if there was an existing VM in a different cluster that was unregistered, delete all the files associated with it. if (existingVmName != null && existingVmFileLayout != null) { -- List<String> vmDatastoreNames = new ArrayList<String>(); ++ List<String> vmDatastoreNames = new ArrayList<>(); for (DatastoreMO vmDatastore : vmMo.getAllDatastores()) { vmDatastoreNames.add(vmDatastore.getName()); } // Don't delete files that are in a datastore that is being used by the new VM as well (zone-wide datastore). -- List<String> skipDatastores = new ArrayList<String>(); ++ List<String> skipDatastores = new ArrayList<>(); for (DatastoreMO existingDatastore : existingDatastores) { if (vmDatastoreNames.contains(existingDatastore.getName())) { skipDatastores.add(existingDatastore.getName()); @@@ -2714,6 -2668,64 +2672,64 @@@ } } + private Pair<ManagedObjectReference, DatastoreMO> getVolumeDatastoreDetails(DiskTO vol, HashMap<String, Pair<ManagedObjectReference, DatastoreMO>> dataStoresDetails) throws Exception { + boolean managed = false; + String iScsiName = null; + Map<String, String> details = vol.getDetails(); + if (MapUtils.isNotEmpty(details)) { + managed = Boolean.parseBoolean(details.get(DiskTO.MANAGED)); + iScsiName = details.get(DiskTO.IQN); + } + + VolumeObjectTO volumeTO = (VolumeObjectTO) vol.getData(); + DataStoreTO primaryStore = volumeTO.getDataStore(); + String primaryStoreUuid = primaryStore.getUuid(); + // if the storage is managed, iScsiName should not be null + String datastoreName = managed ? VmwareResource.getDatastoreName(iScsiName) : primaryStoreUuid; + Pair<ManagedObjectReference, DatastoreMO> volumeDsDetails = dataStoresDetails.get(datastoreName); + if (volumeDsDetails == null) { + throw new Exception("Primary datastore " + primaryStore.getUuid() + " is not mounted on host."); + } + + return volumeDsDetails; + } + + private void syncVolumeDatastoreAndPathForDatastoreCluster(DiskTO vol, VirtualMachineDiskInfoBuilder diskInfoBuilder, VirtualMachineDiskInfo matchingExistingDisk, + Pair<ManagedObjectReference, DatastoreMO> volumeDsDetails, List<Pair<Integer, ManagedObjectReference>> diskDatastores, + VmwareHypervisorHost hyperHost, VmwareContext context) throws Exception { + if (vol.getDetails() == null || vol.getDetails().get(DiskTO.PROTOCOL_TYPE) == null || !vol.getDetails().get(DiskTO.PROTOCOL_TYPE).equalsIgnoreCase("DatastoreCluster")) { + return; + } + + if (diskInfoBuilder != null && matchingExistingDisk != null) { + String[] diskChain = matchingExistingDisk.getDiskChain(); + if (diskChain != null && diskChain.length > 0) { + DatastoreFile file = new DatastoreFile(diskChain[0]); + VolumeObjectTO volumeTO = (VolumeObjectTO) vol.getData(); + if (!file.getFileBaseName().equalsIgnoreCase(volumeTO.getPath())) { - if (s_logger.isInfoEnabled()) { - s_logger.info("Detected disk-chain top file change on volume: " + volumeTO.getId() + " " + volumeTO.getPath() + " -> " + file.getFileBaseName()); ++ if (logger.isInfoEnabled()) { ++ logger.info("Detected disk-chain top file change on volume: " + volumeTO.getId() + " " + volumeTO.getPath() + " -> " + file.getFileBaseName()); + } + volumeTO.setPath(file.getFileBaseName()); + vol.setPath(file.getFileBaseName()); + } + } + DatastoreMO diskDatastoreMofromVM = getDataStoreWhereDiskExists(hyperHost, context, diskInfoBuilder, vol, diskDatastores); + if (diskDatastoreMofromVM != null) { + String actualPoolUuid = diskDatastoreMofromVM.getCustomFieldValue(CustomFieldConstants.CLOUD_UUID); + VolumeObjectTO volumeTO = (VolumeObjectTO) vol.getData(); + DataStoreTO primaryStore = volumeTO.getDataStore(); + if (actualPoolUuid != null && !actualPoolUuid.equalsIgnoreCase(primaryStore.getUuid())) { + volumeDsDetails = new Pair<>(diskDatastoreMofromVM.getMor(), diskDatastoreMofromVM); - if (s_logger.isInfoEnabled()) { - s_logger.info("Detected datastore uuid change on volume: " + volumeTO.getId() + " " + primaryStore.getUuid() + " -> " + actualPoolUuid); ++ if (logger.isInfoEnabled()) { ++ logger.info("Detected datastore uuid change on volume: " + volumeTO.getId() + " " + primaryStore.getUuid() + " -> " + actualPoolUuid); + } + ((PrimaryDataStoreTO)primaryStore).setUuid(actualPoolUuid); + } + } + } + } + private boolean powerOnVM(final VirtualMachineMO vmMo, final String vmInternalCSName, final String vmNameOnVcenter) throws Exception { int retry = 20; while (retry-- > 0) { @@@ -3069,7 -3081,7 +3085,7 @@@ final Pair<VirtualDisk, String> vdisk = getVirtualDiskInfo(vmMo, appendFileType(rootDiskTO.getPath(), VMDK_EXTENSION)); assert (vdisk != null); -- Long reqSize = 0L; ++ long reqSize = 0L; final VolumeObjectTO volumeTO = ((VolumeObjectTO) rootDiskTO.getData()); if (volumeTO != null) { reqSize = volumeTO.getSize() / 1024; @@@ -3185,8 -3197,8 +3201,8 @@@ * @param vmConfigSpec virtual machine config spec */ protected void modifyVmVideoCardVRamSize(VirtualMachineVideoCard videoCard, VirtualMachineMO vmMo, long svgaVmramSize, VirtualMachineConfigSpec vmConfigSpec) { -- if (videoCard.getVideoRamSizeInKB().longValue() != svgaVmramSize) { - logger.info("Video card memory was set " + toHumanReadableSize(videoCard.getVideoRamSizeInKB().longValue()) + " instead of " + toHumanReadableSize(svgaVmramSize)); - s_logger.info("Video card memory was set " + toHumanReadableSize(videoCard.getVideoRamSizeInKB().longValue()) + " instead of " + toHumanReadableSize(svgaVmramSize)); ++ if (videoCard.getVideoRamSizeInKB() != svgaVmramSize) { ++ logger.info("Video card memory was set " + toHumanReadableSize(videoCard.getVideoRamSizeInKB()) + " instead of " + toHumanReadableSize(svgaVmramSize)); configureSpecVideoCardNewVRamSize(videoCard, svgaVmramSize, vmConfigSpec); } } @@@ -3265,7 -3226,7 +3281,7 @@@ if (vmMo == null) return; -- boolean hasSnapshot = false; ++ boolean hasSnapshot; hasSnapshot = vmMo.hasSnapshot(); if (!hasSnapshot) vmMo.tearDownDevices(new Class<?>[]{VirtualDisk.class, VirtualEthernetCard.class}); @@@ -3368,7 -3329,7 +3384,7 @@@ } else if (_instanceNameFlag && vmSpec.getHostName() != null) { vmNameOnVcenter = vmSpec.getHostName(); } -- return new Pair<String, String>(vmInternalCSName, vmNameOnVcenter); ++ return new Pair<>(vmInternalCSName, vmNameOnVcenter); } protected void configNestedHVSupport(VirtualMachineMO vmMo, VirtualMachineTO vmSpec, VirtualMachineConfigSpec vmConfigSpec) throws Exception { @@@ -3385,12 -3346,12 +3401,12 @@@ Boolean nestedHvSupported = hostCapability.isNestedHVSupported(); if (nestedHvSupported == null) { // nestedHvEnabled property is supported only since VMware 5.1. It's not defined for earlier versions. - s_logger.warn("Hypervisor doesn't support nested virtualization, unable to set config for VM " + vmSpec.getName()); - } else if (nestedHvSupported.booleanValue()) { - s_logger.debug("Hypervisor supports nested virtualization, enabling for VM " + vmSpec.getName()); + logger.warn("Hypervisor doesn't support nested virtualization, unable to set config for VM " + vmSpec.getName()); - } else if (nestedHvSupported.booleanValue()) { ++ } else if (nestedHvSupported) { + logger.debug("Hypervisor supports nested virtualization, enabling for VM " + vmSpec.getName()); vmConfigSpec.setNestedHVEnabled(true); } else { - s_logger.warn("Hypervisor doesn't support nested virtualization, unable to set config for VM " + vmSpec.getName()); + logger.warn("Hypervisor doesn't support nested virtualization, unable to set config for VM " + vmSpec.getName()); vmConfigSpec.setNestedHVEnabled(false); } } @@@ -3457,7 -3418,7 +3473,7 @@@ for (NicTO nicTo : sortNicsByDeviceId(vmSpec.getNics())) { if (nicTo.getBroadcastType() == BroadcastDomainType.Lswitch) { // We need to create a port with a unique vlan and pass the key to the nic device - logger.trace("Nic " + nicTo.toString() + " is connected to an NVP logicalswitch"); - s_logger.trace("Nic " + nicTo.toString() + " is connected to an NVP logicalswitch"); ++ logger.trace("Nic " + nicTo + " is connected to an NVP logicalswitch"); VirtualDevice nicVirtualDevice = vmMo.getNicDeviceByIndex(nicIndex); if (nicVirtualDevice == null) { throw new Exception("Failed to find a VirtualDevice for nic " + nicIndex); //FIXME Generic exceptions are bad @@@ -3471,7 -3432,7 +3487,7 @@@ String portGroupKey = port.getPortgroupKey(); String dvSwitchUuid = port.getSwitchUuid(); - logger.debug("NIC " + nicTo.toString() + " is connected to dvSwitch " + dvSwitchUuid + " pg " + portGroupKey + " port " + portKey); - s_logger.debug("NIC " + nicTo.toString() + " is connected to dvSwitch " + dvSwitchUuid + " pg " + portGroupKey + " port " + portKey); ++ logger.debug("NIC " + nicTo + " is connected to dvSwitch " + dvSwitchUuid + " pg " + portGroupKey + " port " + portKey); ManagedObjectReference dvSwitchManager = vmMo.getContext().getVimClient().getServiceContent().getDvSwitchManager(); ManagedObjectReference dvSwitch = vmMo.getContext().getVimClient().getService().queryDvsByUuid(dvSwitchManager, dvSwitchUuid); @@@ -3483,7 -3444,7 +3499,7 @@@ List<DistributedVirtualPort> dvPorts = vmMo.getContext().getVimClient().getService().fetchDVPorts(dvSwitch, criteria); DistributedVirtualPort vmDvPort = null; -- List<Integer> usedVlans = new ArrayList<Integer>(); ++ List<Integer> usedVlans = new ArrayList<>(); for (DistributedVirtualPort dvPort : dvPorts) { // Find the port for this NIC by portkey if (portKey.equals(dvPort.getKey())) { @@@ -3498,7 -3459,7 +3514,7 @@@ } if (vmDvPort == null) { -- throw new Exception("Empty port list from dvSwitch for nic " + nicTo.toString()); ++ throw new Exception("Empty port list from dvSwitch for nic " + nicTo); } DVPortConfigInfo dvPortConfigInfo = vmDvPort.getConfig(); @@@ -3528,15 -3489,15 +3544,15 @@@ dvPortConfigSpec.setSetting(edittedSettings); dvPortConfigSpec.setOperation("edit"); dvPortConfigSpec.setKey(portKey); -- List<DVPortConfigSpec> dvPortConfigSpecs = new ArrayList<DVPortConfigSpec>(); ++ List<DVPortConfigSpec> dvPortConfigSpecs = new ArrayList<>(); dvPortConfigSpecs.add(dvPortConfigSpec); ManagedObjectReference task = vmMo.getContext().getVimClient().getService().reconfigureDVPortTask(dvSwitch, dvPortConfigSpecs); if (!vmMo.getContext().getVimClient().waitForTask(task)) { -- throw new Exception("Failed to configure the dvSwitch port for nic " + nicTo.toString()); ++ throw new Exception("Failed to configure the dvSwitch port for nic " + nicTo); } - logger.debug("NIC " + nicTo.toString() + " connected to vlan " + i); - s_logger.debug("NIC " + nicTo.toString() + " connected to vlan " + i); ++ logger.debug("NIC " + nicTo + " connected to vlan " + i); } else { - s_logger.trace("Port already configured and set to vlan " + vlanId.getVlanId()); + logger.trace("Port already configured and set to vlan " + vlanId.getVlanId()); } } else if (backing instanceof VirtualEthernetCardNetworkBackingInfo) { // This NIC is connected to a Virtual Switch @@@ -3595,8 -3556,8 +3611,8 @@@ private Pair<String, String> getVMDiskInfo(String volumePath, boolean isManaged, String iScsiName, String datastoreUUID, VmwareHypervisorHost hyperHost, VmwareContext context) throws Exception { -- String dsName = null; -- String diskBackingFileBaseName = null; ++ String dsName; ++ String diskBackingFileBaseName; if (isManaged) { // if the storage is managed, iScsiName should not be null @@@ -3771,11 -3732,11 +3787,11 @@@ private static NicTO[] sortNicsByDeviceId(NicTO[] nics) { -- List<NicTO> listForSort = new ArrayList<NicTO>(); ++ List<NicTO> listForSort = new ArrayList<>(); for (NicTO nic : nics) { listForSort.add(nic); } -- Collections.sort(listForSort, new Comparator<NicTO>() { ++ Collections.sort(listForSort, new Comparator<>() { @Override public int compare(NicTO arg0, NicTO arg1) { @@@ -3794,11 -3755,11 +3810,11 @@@ private static DiskTO[] sortVolumesByDeviceId(DiskTO[] volumes) { -- List<DiskTO> listForSort = new ArrayList<DiskTO>(); ++ List<DiskTO> listForSort = new ArrayList<>(); for (DiskTO vol : volumes) { listForSort.add(vol); } -- Collections.sort(listForSort, new Comparator<DiskTO>() { ++ Collections.sort(listForSort, new Comparator<>() { @Override public int compare(DiskTO arg0, DiskTO arg1) { @@@ -3834,9 -3795,9 +3850,7 @@@ path = path.substring(startIndex + search.length()); -- final String search2 = VMDK_EXTENSION; -- -- int endIndex = path.indexOf(search2); ++ int endIndex = path.indexOf(VMDK_EXTENSION); if (endIndex == -1) { return null; @@@ -3953,7 -3914,7 +3967,7 @@@ } private Pair<ManagedObjectReference, DatastoreMO> getDatastoreThatDiskIsOn(HashMap<String, Pair<ManagedObjectReference, DatastoreMO>> dataStoresDetails, DiskTO vol) { -- Pair<ManagedObjectReference, DatastoreMO> rootDiskDataStoreDetails = null; ++ Pair<ManagedObjectReference, DatastoreMO> rootDiskDataStoreDetails; Map<String, String> details = vol.getDetails(); boolean managed = false; @@@ -4022,9 -3983,9 +4036,9 @@@ String vlanToken = switchDetails.third(); String namePrefix = getNetworkNamePrefix(nicTo); -- Pair<ManagedObjectReference, String> networkInfo = null; ++ Pair<ManagedObjectReference, String> networkInfo; - s_logger.info("Prepare network on " + switchType + " " + switchName + " with name prefix: " + namePrefix); + logger.info("Prepare network on " + switchType + " " + switchName + " with name prefix: " + namePrefix); if (VirtualSwitchType.StandardVirtualSwitch == switchType) { networkInfo = HypervisorHostHelper.prepareNetwork(switchName, namePrefix, hostMo, @@@ -4032,7 -3993,7 +4046,7 @@@ _opsTimeout, true, nicTo.getBroadcastType(), nicTo.getUuid(), nicTo.getDetails()); } else { String vlanId = getVlanInfo(nicTo, vlanToken); -- String svlanId = null; ++ String svlanId; boolean pvlannetwork = (getPvlanInfo(nicTo) == null) ? false : true; if (vmType != null && vmType.equals(VirtualMachine.Type.DomainRouter) && pvlannetwork) { // plumb this network to the promiscuous vlan. @@@ -4055,7 -4016,7 +4069,7 @@@ TrafficType trafficType = nicTo.getType(); if (!Arrays.asList(supportedTrafficTypes).contains(trafficType)) { -- throw new CloudException("Traffic type " + trafficType.toString() + " for nic " + nicTo.toString() + " is not supported."); ++ throw new CloudException("Traffic type " + trafficType.toString() + " for nic " + nicTo + " is not supported."); } String switchName = null; @@@ -4090,12 -4051,12 +4104,12 @@@ if (switchType == VirtualSwitchType.NexusDistributedVirtualSwitch) { if (trafficType == TrafficType.Management || trafficType == TrafficType.Storage) { throw new CloudException( -- "Unable to configure NIC " + nicTo.toString() + " as traffic type " + trafficType.toString() + " is not supported over virtual switch type " + switchType ++ "Unable to configure NIC " + nicTo + " as traffic type " + trafficType + " is not supported over virtual switch type " + switchType + ". Please specify only supported type of virtual switches i.e. {vmwaresvs, vmwaredvs} in physical network traffic label."); } } -- return new Ternary<String, String, String>(switchName, switchType.toString(), vlanId); ++ return new Ternary<>(switchName, switchType.toString(), vlanId); } private String getNetworkNamePrefix(NicTO nicTo) throws Exception { @@@ -4171,7 -4132,7 +4185,7 @@@ ManagedObjectReference morDs = prepareSecondaryDatastoreOnHost(storeUrl); DatastoreMO dsMo = new DatastoreMO(getServiceContext(), morDs); -- return new Pair<String, ManagedObjectReference>(String.format("[%s] %s%s", dsMo.getName(), isoPath, isoFileName), morDs); ++ return new Pair<>(String.format("[%s] %s%s", dsMo.getName(), isoPath, isoFileName), morDs); } protected Answer execute(ReadyCommand cmd) { @@@ -4179,7 -4140,7 +4193,7 @@@ VmwareContext context = getServiceContext(); VmwareHypervisorHost hyperHost = getHyperHost(context); -- Map<String, String> hostDetails = new HashMap<String, String>(); ++ Map<String, String> hostDetails = new HashMap<>(); ManagedObjectReference morHost = hyperHost.getMor(); HostMO hostMo = new HostMO(context, morHost); boolean uefiLegacySupported = hostMo.isUefiLegacySupported(); @@@ -4230,7 -4191,7 +4244,7 @@@ // getVmNames should return all i-x-y values. List<String> requestedVmNames = cmd.getVmNames(); -- List<String> vmNames = new ArrayList<String>(); ++ List<String> vmNames = new ArrayList<>(); if (requestedVmNames != null) { for (String vmName : requestedVmNames) { @@@ -4299,7 -4260,7 +4313,7 @@@ long writeReq = 0; long writeBytes = 0; -- final ArrayList<PerfMetricId> perfMetricsIds = new ArrayList<PerfMetricId>(); ++ final ArrayList<PerfMetricId> perfMetricsIds = new ArrayList<>(); if (diskReadIOPerfCounterInfo != null) { perfMetricsIds.add(VmwareHelper.createPerfMetricId(diskReadIOPerfCounterInfo, diskBusName)); } @@@ -4389,7 -4350,7 +4403,7 @@@ VmwareHypervisorHost hyperHost = getHyperHost(getServiceContext()); ManagedObjectReference dcMor = hyperHost.getHyperHostDatacenter(); DatacenterMO dcMo = new DatacenterMO(getServiceContext(), dcMor); -- HashMap<String, VolumeStatsEntry> statEntry = new HashMap<String, VolumeStatsEntry>(); ++ HashMap<String, VolumeStatsEntry> statEntry = new HashMap<>(); for (String chainInfo : cmd.getVolumeUuids()) { if (chainInfo != null) { @@@ -4464,7 -4425,7 +4478,7 @@@ if (getVmPowerState(vmMo) != PowerState.PowerOff) { String msg = "Stop VM " + cmd.getVmName() + " Succeed"; -- boolean success = false; ++ boolean success; if (cmd.isForceStop()) { success = vmMo.powerOff(); } else { @@@ -4750,7 -4710,7 +4764,7 @@@ } Answer createAnswerForCmd(VirtualMachineMO vmMo, List<VolumeObjectTO> volumeObjectToList, Command cmd, Map<Integer, Long> volumeDeviceKey) throws Exception { -- List<VolumeObjectTO> volumeToList = new ArrayList<>(); ++ List<VolumeObjectTO> volumeToList; VirtualMachineDiskInfoBuilder diskInfoBuilder = vmMo.getDiskInfoBuilder(); VirtualDisk[] disks = vmMo.getAllDiskDevice(); Answer answer; @@@ -4812,7 -4772,7 +4826,7 @@@ } private VirtualMachineMO getVirtualMachineMO(String vmName, VmwareHypervisorHost hyperHost) { -- VirtualMachineMO vmMo = null; ++ VirtualMachineMO vmMo; try { // find VM through datacenter (VM is not at the target host yet) vmMo = hyperHost.findVmOnPeerHyperHost(vmName); @@@ -4881,16 -4841,16 +4895,16 @@@ } private Answer migrateVolume(MigrateVolumeCommand cmd) { -- Answer answer = null; ++ Answer answer; String path = cmd.getVolumePath(); VmwareHypervisorHost hyperHost = getHyperHost(getServiceContext()); VirtualMachineMO vmMo = null; -- DatastoreMO sourceDsMo = null; ++ DatastoreMO sourceDsMo; DatastoreMO destinationDsMo = null; -- ManagedObjectReference morSourceDS = null; -- ManagedObjectReference morDestinationDS = null; -- String vmdkDataStorePath = null; ++ ManagedObjectReference morSourceDS; ++ ManagedObjectReference morDestinationDS; ++ String vmdkDataStorePath; boolean isvVolsInvolved = false; String vmName = null; @@@ -5047,20 -5007,20 +5061,20 @@@ String vmName = cmd.getAttachedVmName(); -- VirtualMachineMO vmMo = null; -- VmwareHypervisorHost srcHyperHost = null; ++ VirtualMachineMO vmMo; ++ VmwareHypervisorHost srcHyperHost; // OfflineVmwareMigration: ifhost is null ??? if (StringUtils.isBlank(cmd.getAttachedVmName())) { return migrateVolume(cmd); } -- ManagedObjectReference morDs = null; -- ManagedObjectReference morDc = null; ++ ManagedObjectReference morDs; ++ ManagedObjectReference morDc; VirtualMachineRelocateSpec relocateSpec = new VirtualMachineRelocateSpec(); -- List<VirtualMachineRelocateSpecDiskLocator> diskLocators = new ArrayList<VirtualMachineRelocateSpecDiskLocator>(); -- VirtualMachineRelocateSpecDiskLocator diskLocator = null; ++ List<VirtualMachineRelocateSpecDiskLocator> diskLocators = new ArrayList<>(); ++ VirtualMachineRelocateSpecDiskLocator diskLocator; -- String tgtDsName = ""; ++ String tgtDsName; try { srcHyperHost = getHyperHost(getServiceContext()); @@@ -5120,9 -5080,9 +5134,9 @@@ // Hence set the existing datastore as target datastore for volumes that are not to be migrated. List<Pair<Integer, ManagedObjectReference>> diskDatastores = vmMo.getAllDiskDatastores(); for (Pair<Integer, ManagedObjectReference> diskDatastore : diskDatastores) { -- if (diskDatastore.first().intValue() != diskId) { ++ if (diskDatastore.first() != diskId) { diskLocator = new VirtualMachineRelocateSpecDiskLocator(); -- diskLocator.setDiskId(diskDatastore.first().intValue()); ++ diskLocator.setDiskId(diskDatastore.first()); diskLocator.setDatastore(diskDatastore.second()); diskLocators.add(diskLocator); } @@@ -5261,8 -5221,8 +5275,8 @@@ DatastoreMO dsMo = new DatastoreMO(getServiceContext(), morDatastore); HypervisorHostHelper.createBaseFolder(dsMo, hyperHost, pool.getType()); -- long capacity = 0; -- long available = 0; ++ long capacity; ++ long available; List<ModifyStoragePoolAnswer> childDatastoresModifyStoragePoolAnswers = new ArrayList<>(); if (pool.getType() == StoragePoolType.DatastoreCluster) { StoragepodMO datastoreClusterMo = new StoragepodMO(getServiceContext(), morDatastore); @@@ -5544,7 -5504,7 +5558,7 @@@ * "ManageSnapshotCommand", * "{\"_commandSwitch\":\"-c\",\"_volumePath\":\"i-2-3-KY-ROOT\",\"_snapshotName\":\"i-2-3-KY_i-2-3-KY-ROOT_20101102203827\",\"_snapshotId\":1,\"_vmName\":\"i-2-3-KY\"}" */ -- boolean success = false; ++ boolean success; String cmdSwitch = cmd.getCommandSwitch(); String snapshotOp = "Unsupported snapshot command." + cmdSwitch; if (cmdSwitch.equals(ManageSnapshotCommand.CREATE_SNAPSHOT)) { @@@ -5553,8 -5513,8 +5567,8 @@@ snapshotOp = "destroy"; } -- String details = "ManageSnapshotCommand operation: " + snapshotOp + " Failed for snapshotId: " + snapshotId; -- String snapshotUUID = null; ++ String details; ++ String snapshotUUID; // snapshot operation (create or destroy) is handled inside BackupSnapshotCommand(), we just fake // a success return here @@@ -5612,7 -5572,7 +5626,7 @@@ } protected Answer execute(CreateVolumeFromSnapshotCommand cmd) { -- String details = null; ++ String details; boolean success = false; String newVolumeName = UUID.randomUUID().toString(); @@@ -5660,8 -5620,8 +5674,8 @@@ ManagedObjectReference morDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, cmd.getStorageId()); if (morDs != null) { -- long capacity = 0; -- long free = 0; ++ long capacity; ++ long free; if (cmd.getPooltype() == StoragePoolType.DatastoreCluster) { StoragepodMO datastoreClusterMo = new StoragepodMO(getServiceContext(), morDs); StoragePodSummary summary = datastoreClusterMo.getDatastoreClusterSummary(); @@@ -5795,8 -5755,8 +5809,8 @@@ String details = "Unable to find IP Address of VM. "; String vmName = cmd.getVmName(); boolean result = false; -- String ip = null; -- Answer answer = null; ++ String ip; ++ Answer answer; VmwareContext context = getServiceContext(); VmwareHypervisorHost hyperHost = getHyperHost(context); @@@ -6083,7 -6043,7 +6097,7 @@@ VirtualMachineMO vmMo = new VirtualMachineMO(hyperHost.getContext(), oc.getObj()); if (!template && isWorker) { -- boolean recycle = false; ++ boolean recycle; recycle = mgr.needRecycle(workerTag); if (recycle) { @@@ -6117,7 -6077,7 +6131,7 @@@ @Override public StartupCommand[] initialize() { try { -- String hostApiVersion = "4.1"; ++ String hostApiVersion; VmwareContext context = getServiceContext(); try { VmwareHypervisorHost hyperHost = getHyperHost(context); @@@ -6159,7 -6119,7 +6173,7 @@@ } private List<StartupStorageCommand> initializeLocalStorage() { -- List<StartupStorageCommand> storageCmds = new ArrayList<StartupStorageCommand>(); ++ List<StartupStorageCommand> storageCmds = new ArrayList<>(); VmwareContext context = getServiceContext(); try { @@@ -6213,7 -6173,7 +6227,7 @@@ VmwareContext serviceContext = getServiceContext(); Map<String, String> details = cmd.getHostDetails(); if (details == null) { -- details = new HashMap<String, String>(); ++ details = new HashMap<>(); } try { @@@ -6221,16 -6181,16 +6235,16 @@@ fillHostNetworkInfo(serviceContext, cmd); fillHostDetailsInfo(serviceContext, details); } catch (RuntimeFaultFaultMsg e) { - logger.error("RuntimeFault while retrieving host info: " + e.toString(), e); - s_logger.error("RuntimeFault while retrieving host info: " + e.toString(), e); ++ logger.error("RuntimeFault while retrieving host info: " + e, e); throw new CloudRuntimeException("RuntimeFault while retrieving host info"); } catch (RemoteException e) { - logger.error("RemoteException while retrieving host info: " + e.toString(), e); - s_logger.error("RemoteException while retrieving host info: " + e.toString(), e); ++ logger.error("RemoteException while retrieving host info: " + e, e); invalidateServiceContext(); throw new CloudRuntimeException("RemoteException while retrieving host info"); } catch (Exception e) { - logger.error("Exception while retrieving host info: " + e.toString(), e); - s_logger.error("Exception while retrieving host info: " + e.toString(), e); ++ logger.error("Exception while retrieving host info: " + e, e); invalidateServiceContext(); -- throw new CloudRuntimeException("Exception while retrieving host info: " + e.toString()); ++ throw new CloudRuntimeException("Exception while retrieving host info: " + e); } cmd.setHostDetails(details); @@@ -6329,7 -6289,7 +6343,7 @@@ try { return getHostVmStateReport(); } catch (Exception e) { -- return new HashMap<String, HostVmStateReportEntry>(); ++ return new HashMap<>(); } } @@@ -6448,7 -6408,7 +6462,7 @@@ // the internal CS name, but the custom field CLOUD_VM_INTERNAL_NAME always stores the internal CS name. ObjectContent[] ocs = hyperHost.getVmPropertiesOnHyperHost(new String[]{"name", "runtime.powerState", "config.template", instanceNameCustomField}); -- HashMap<String, HostVmStateReportEntry> newStates = new HashMap<String, HostVmStateReportEntry>(); ++ HashMap<String, HostVmStateReportEntry> newStates = new HashMap<>(); if (ocs != null && ocs.length > 0) { for (ObjectContent oc : ocs) { List<DynamicProperty> objProps = oc.getPropSet(); @@@ -6500,7 -6460,7 +6514,7 @@@ // the internal CS name, but the custom field CLOUD_VM_INTERNAL_NAME always stores the internal CS name. ObjectContent[] ocs = hyperHost.getVmPropertiesOnHyperHost(new String[]{"name", "runtime.powerState", "config.template", instanceNameCustomField}); -- HashMap<String, PowerState> newStates = new HashMap<String, PowerState>(); ++ HashMap<String, PowerState> newStates = new HashMap<>(); if (ocs != null && ocs.length > 0) { for (ObjectContent oc : ocs) { List<DynamicProperty> objProps = oc.getPropSet(); @@@ -6541,7 -6501,7 +6555,7 @@@ private HashMap<String, VmStatsEntry> getVmStats(List<String> vmNames) throws Exception { VmwareHypervisorHost hyperHost = getHyperHost(getServiceContext()); -- HashMap<String, VmStatsEntry> vmResponseMap = new HashMap<String, VmStatsEntry>(); ++ HashMap<String, VmStatsEntry> vmResponseMap = new HashMap<>(); ManagedObjectReference perfMgr = getServiceContext().getServiceContent().getPerfManager(); VimPortType service = getServiceContext().getService(); @@@ -6603,7 -6563,7 +6617,7 @@@ for (ObjectContent oc : ocs) { List<DynamicProperty> objProps = oc.getPropSet(); if (objProps != null) { -- String name = null; ++ String name; String numberCPUs = null; double maxCpuUsage = 0; String memlimit = null; @@@ -6654,7 -6614,7 +6668,7 @@@ double diskReadKbs = 0; double diskWriteKbs = 0; -- final ArrayList<PerfMetricId> perfMetricsIds = new ArrayList<PerfMetricId>(); ++ final ArrayList<PerfMetricId> perfMetricsIds = new ArrayList<>(); if (rxPerfCounterInfo != null) { perfMetricsIds.add(VmwareHelper.createPerfMetricId(rxPerfCounterInfo, "")); } @@@ -6781,7 -6741,7 +6795,7 @@@ stats[1] += Long.parseLong(splitResult[i++]); } } catch (Throwable e) { - logger.warn("Unable to parse return from script return of network usage command: " + e.toString(), e); - s_logger.warn("Unable to parse return from script return of network usage command: " + e.toString(), e); ++ logger.warn("Unable to parse return from script return of network usage command: " + e, e); } } return stats; @@@ -6804,7 -6764,7 +6818,7 @@@ sch.connect(addr); return null; } catch (IOException e) { - logger.info("Could not connect to " + ipAddress + " due to " + e.toString()); - s_logger.info("Could not connect to " + ipAddress + " due to " + e.toString()); ++ logger.info("Could not connect to " + ipAddress + " due to " + e); if (e instanceof ConnectException) { // if connection is refused because of VM is being started, // we give it more sleep time @@@ -6945,21 -6905,21 +6959,21 @@@ Integer intObj = (Integer) params.get("ports.per.dvportgroup"); if (intObj != null) -- _portsPerDvPortGroup = intObj.intValue(); ++ _portsPerDvPortGroup = intObj; - s_logger.info("VmwareResource network configuration info." + " private traffic over vSwitch: " + _privateNetworkVSwitchName + ", public traffic over " + logger.info("VmwareResource network configuration info." + " private traffic over vSwitch: " + _privateNetworkVSwitchName + ", public traffic over " + _publicTrafficInfo.getVirtualSwitchType() + " : " + _publicTrafficInfo.getVirtualSwitchName() + ", guest traffic over " + _guestTrafficInfo.getVirtualSwitchType() + " : " + _guestTrafficInfo.getVirtualSwitchName()); Boolean boolObj = (Boolean) params.get("vmware.create.full.clone"); -- if (boolObj != null && boolObj.booleanValue()) { ++ if (boolObj != null && boolObj) { _fullCloneFlag = true; } else { _fullCloneFlag = false; } boolObj = (Boolean) params.get("vm.instancename.flag"); -- if (boolObj != null && boolObj.booleanValue()) { ++ if (boolObj != null && boolObj) { _instanceNameFlag = true; } else { _instanceNameFlag = false; @@@ -7024,7 -6984,7 +7038,7 @@@ @Override public VmwareContext getServiceContext(Command cmd) { -- VmwareContext context = null; ++ VmwareContext context; if (s_serviceContext.get() != null) { context = s_serviceContext.get(); String poolKey = VmwareContextPool.composePoolKey(_vCenterAddress, _username); @@@ -7207,10 -7167,10 +7221,10 @@@ if (keyFile == null || !keyFile.exists()) { filePath = s_defaultPathSystemVmKeyFile; keyFile = new File(filePath); - s_logger.debug("Looking for file [" + filePath + "] in the classpath."); + logger.debug("Looking for file [" + filePath + "] in the classpath."); } if (!keyFile.exists()) { - logger.error("Unable to locate id_rsa.cloud in your setup at " + keyFile.toString()); - s_logger.error("Unable to locate id_rsa.cloud in your setup at " + keyFile.toString()); ++ logger.error("Unable to locate id_rsa.cloud in your setup at " + keyFile); } return keyFile; } @@@ -7293,13 -7253,13 +7307,13 @@@ } VmwareHypervisorHost sourceHyperHost = hypervisorHost; VmwareHypervisorHost targetHyperHost = hostInTargetCluster; -- VirtualMachineMO vmMo = null; -- ManagedObjectReference morSourceHostDc = null; ++ VirtualMachineMO vmMo; ++ ManagedObjectReference morSourceHostDc; VirtualMachineRelocateSpec relocateSpec = new VirtualMachineRelocateSpec(); -- List<VirtualMachineRelocateSpecDiskLocator> diskLocators = new ArrayList<VirtualMachineRelocateSpecDiskLocator>(); -- Set<String> mountedDatastoresAtSource = new HashSet<String>(); ++ List<VirtualMachineRelocateSpecDiskLocator> diskLocators = new ArrayList<>(); ++ Set<String> mountedDatastoresAtSource = new HashSet<>(); List<VolumeObjectTO> volumeToList = new ArrayList<>(); -- Map<Long, Integer> volumeDeviceKey = new HashMap<Long, Integer>(); ++ Map<Long, Integer> volumeDeviceKey = new HashMap<>(); try { if (sourceHyperHost == null) { @@@ -7339,7 -7299,7 +7353,7 @@@ if (StringUtils.isNotBlank(poolUuid)) { VmwareHypervisorHost dsHost = targetHyperHost == null ? sourceHyperHost : targetHyperHost; -- ManagedObjectReference morDatastore = null; ++ ManagedObjectReference morDatastore; morDatastore = getTargetDatastoreMOReference(poolUuid, dsHost); if (morDatastore == null) { String msg = String.format("Unable to find the target datastore: %s on host: %s to execute migration", poolUuid, dsHost.getHyperHostName()); @@@ -7388,9 -7348,9 +7402,9 @@@ // Hence set the existing datastore as target datastore for volumes that are not to be migrated. List<Pair<Integer, ManagedObjectReference>> diskDatastores = vmMo.getAllDiskDatastores(); for (Pair<Integer, ManagedObjectReference> diskDatastore : diskDatastores) { -- if (!volumeDeviceKey.containsValue(diskDatastore.first().intValue())) { ++ if (!volumeDeviceKey.containsValue(diskDatastore.first())) { VirtualMachineRelocateSpecDiskLocator diskLocator = new VirtualMachineRelocateSpecDiskLocator(); -- diskLocator.setDiskId(diskDatastore.first().intValue()); ++ diskLocator.setDiskId(diskDatastore.first()); diskLocator.setDatastore(diskDatastore.second()); diskLocators.add(diskLocator); } @@@ -7629,7 -7588,7 +7643,7 @@@ VmwareContext context = getServiceContext(); VmwareHypervisorHost hyperHost = getHyperHost(context); -- ManagedObjectReference morDatastore = null; ++ ManagedObjectReference morDatastore; int count = 0; List<String> names = new ArrayList<>(); @@@ -7737,8 -7696,8 +7751,8 @@@ return new Answer(command, true, "success"); } catch (Exception e) { - s_logger.error("Unexpected exception: ", e); - return new Answer(command, false, "Unable to execute PrepareForBackupRestorationCommand due to " + e.toString()); + logger.error("Unexpected exception: ", e); - return new Answer(command, false, "Unable to execute PrepareForBackupRestorationCommand due to " + e.toString()); ++ return new Answer(command, false, "Unable to execute PrepareForBackupRestorationCommand due to " + e); } } diff --cc plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/driver/LinstorPrimaryDataStoreDriverImpl.java index 87dad560f29,3b384831518..574e5ddcfea --- a/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/driver/LinstorPrimaryDataStoreDriverImpl.java +++ b/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/driver/LinstorPrimaryDataStoreDriverImpl.java @@@ -1511,12 -1512,59 +1514,59 @@@ public class LinstorPrimaryDataStoreDri @Override public Pair<Long, Long> getStorageStats(StoragePool storagePool) { - return null; - s_logger.debug(String.format("Requesting storage stats: %s", storagePool)); ++ logger.debug(String.format("Requesting storage stats: %s", storagePool)); + return LinstorUtil.getStorageStats(storagePool.getHostAddress(), getRscGrp(storagePool)); } @Override public boolean canProvideVolumeStats() { - return false; + return LinstorConfigurationManager.VolumeStatsCacheTime.value() > 0; + } + + /** + * Updates the cache map containing current allocated size data. + * @param api Linstor Developers api object + */ + private void fillVolumeStatsCache(DevelopersApi api) { + try { - s_logger.trace("Start volume stats cache update"); ++ logger.trace("Start volume stats cache update"); + List<ResourceWithVolumes> resources = api.viewResources( + Collections.emptyList(), + Collections.emptyList(), + Collections.emptyList(), + null, + null, + null); + + List<ResourceDefinition> rscDfns = api.resourceDefinitionList( + Collections.emptyList(), true, null, null, null); + + HashMap<String, Long> resSizeMap = new HashMap<>(); + for (ResourceDefinition rscDfn : rscDfns) { + if (CollectionUtils.isNotEmpty(rscDfn.getVolumeDefinitions())) { + resSizeMap.put(rscDfn.getName(), rscDfn.getVolumeDefinitions().get(0).getSizeKib() * 1024); + } + } + + HashMap<String, Long> allocSizeMap = new HashMap<>(); + for (ResourceWithVolumes rsc : resources) { + if (!LinstorUtil.isRscDiskless(rsc) && !rsc.getVolumes().isEmpty()) { + long allocatedBytes = allocSizeMap.getOrDefault(rsc.getName(), 0L); + allocSizeMap.put(rsc.getName(), Math.max(allocatedBytes, rsc.getVolumes().get(0).getAllocatedSizeKib() * 1024)); + } + } + + volumeStats.clear(); + for (Map.Entry<String, Long> entry : allocSizeMap.entrySet()) { + Long reserved = resSizeMap.getOrDefault(entry.getKey(), 0L); + Pair<Long, Long> volStat = new Pair<>(entry.getValue(), reserved); + volumeStats.put(entry.getKey(), volStat); + } + volumeStatsLastUpdate = System.currentTimeMillis(); - s_logger.trace("Done volume stats cache update: " + volumeStats.size()); ++ logger.trace("Done volume stats cache update: {}", volumeStats.size()); + } catch (ApiException e) { - s_logger.error("Unable to fetch Linstor resources: " + e.getBestMessage()); ++ logger.error("Unable to fetch Linstor resources: {}", e.getBestMessage()); + } } @Override diff --cc plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/util/LinstorUtil.java index 87b31d70554,60d06590006..40aca2fdd45 --- a/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/util/LinstorUtil.java +++ b/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/util/LinstorUtil.java @@@ -196,6 -195,30 +196,30 @@@ public class LinstorUtil } } + public static Pair<Long, Long> getStorageStats(String linstorUrl, String rscGroupName) { + DevelopersApi linstorApi = getLinstorAPI(linstorUrl); + try { + List<StoragePool> storagePools = LinstorUtil.getRscGroupStoragePools(linstorApi, rscGroupName); + + long capacity = storagePools.stream() + .filter(sp -> sp.getProviderKind() != ProviderKind.DISKLESS) + .mapToLong(sp -> sp.getTotalCapacity() != null ? sp.getTotalCapacity() : 0L) + .sum() * 1024; // linstor uses kiB + + long used = storagePools.stream() + .filter(sp -> sp.getProviderKind() != ProviderKind.DISKLESS) + .mapToLong(sp -> sp.getTotalCapacity() != null && sp.getFreeCapacity() != null ? + sp.getTotalCapacity() - sp.getFreeCapacity() : 0L) + .sum() * 1024; // linstor uses Kib - s_logger.debug( ++ LOGGER.debug( + String.format("Linstor(%s;%s): storageStats -> %d/%d", linstorUrl, rscGroupName, capacity, used)); + return new Pair<>(capacity, used); + } catch (ApiException apiEx) { - s_logger.error(apiEx.getMessage()); ++ LOGGER.error(apiEx.getMessage()); + throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx); + } + } + /** * Check if any resource of the given name is InUse on any host. * diff --cc server/src/main/java/com/cloud/alert/AlertManagerImpl.java index db2ed4927d5,3fae6b453f2..f35a0664a85 --- a/server/src/main/java/com/cloud/alert/AlertManagerImpl.java +++ b/server/src/main/java/com/cloud/alert/AlertManagerImpl.java @@@ -88,41 -83,54 +89,55 @@@ import com.cloud.utils.Pair import com.cloud.utils.component.ManagerBase; import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.db.SearchCriteria; + import org.jetbrains.annotations.Nullable; public class AlertManagerImpl extends ManagerBase implements AlertManager, Configurable { + protected Logger logger = LogManager.getLogger(AlertManagerImpl.class.getName()); + + public static final List<AlertType> ALERTS = Arrays.asList(AlertType.ALERT_TYPE_HOST + , AlertType.ALERT_TYPE_USERVM + , AlertType.ALERT_TYPE_DOMAIN_ROUTER + , AlertType.ALERT_TYPE_CONSOLE_PROXY + , AlertType.ALERT_TYPE_SSVM + , AlertType.ALERT_TYPE_STORAGE_MISC + , AlertType.ALERT_TYPE_MANAGEMENT_NODE + , AlertType.ALERT_TYPE_RESOURCE_LIMIT_EXCEEDED + , AlertType.ALERT_TYPE_UPLOAD_FAILED + , AlertType.ALERT_TYPE_OOBM_AUTH_ERROR + , AlertType.ALERT_TYPE_HA_ACTION + , AlertType.ALERT_TYPE_CA_CERT); - protected Logger logger = Logger.getLogger(AlertManagerImpl.class.getName()); + private static final long INITIAL_CAPACITY_CHECK_DELAY = 30L * 1000L; // Thirty seconds expressed in milliseconds. private static final DecimalFormat DfPct = new DecimalFormat("###.##"); private static final DecimalFormat DfWhole = new DecimalFormat("########"); @Inject -- private AlertDao _alertDao; ++ AlertDao _alertDao; @Inject protected StorageManager _storageMgr; @Inject protected CapacityManager _capacityMgr; @Inject -- private CapacityDao _capacityDao; ++ CapacityDao _capacityDao; @Inject -- private DataCenterDao _dcDao; ++ DataCenterDao _dcDao; @Inject -- private HostPodDao _podDao; ++ HostPodDao _podDao; @Inject -- private ClusterDao _clusterDao; ++ ClusterDao _clusterDao; @Inject -- private IPAddressDao _publicIPAddressDao; ++ IPAddressDao _publicIPAddressDao; @Inject -- private DataCenterIpAddressDao _privateIPAddressDao; ++ DataCenterIpAddressDao _privateIPAddressDao; @Inject -- private PrimaryDataStoreDao _storagePoolDao; ++ PrimaryDataStoreDao _storagePoolDao; @Inject -- private ConfigurationDao _configDao; ++ ConfigurationDao _configDao; @Inject -- private ResourceManager _resourceMgr; ++ ResourceManager _resourceMgr; @Inject -- private ConfigurationManager _configMgr; ++ ConfigurationManager _configMgr; @Inject protected ConfigDepot _configDepot; @Inject @@@ -138,7 -146,7 +153,7 @@@ private double _vlanCapacityThreshold = 0.75; private double _directNetworkPublicIpCapacityThreshold = 0.75; private double _localStorageCapacityThreshold = 0.75; -- Map<Short, Double> _capacityTypeThresholdMap = new HashMap<Short, Double>(); ++ Map<Short, Double> _capacityTypeThresholdMap = new HashMap<>(); private final ExecutorService _executor; @@@ -402,18 -372,18 +417,15 @@@ private void createOrUpdateVlanCapacity(long dcId, AllocationState capacityState) { SearchCriteria<CapacityVO> capacitySC = _capacityDao.createSearchCriteria(); -- -- List<CapacityVO> capacities = _capacityDao.search(capacitySC, null); -- capacitySC = _capacityDao.createSearchCriteria(); capacitySC.addAnd("dataCenterId", SearchCriteria.Op.EQ, dcId); capacitySC.addAnd("capacityType", SearchCriteria.Op.EQ, Capacity.CAPACITY_TYPE_VLAN); -- capacities = _capacityDao.search(capacitySC, null); ++ List<CapacityVO> capacities = _capacityDao.search(capacitySC, null); int totalVlans = _dcDao.countZoneVlans(dcId, false); int allocatedVlans = _dcDao.countZoneVlans(dcId, true); CapacityState vlanCapacityState = (capacityState == AllocationState.Disabled) ? CapacityState.Disabled : CapacityState.Enabled; -- if (capacities.size() == 0) { ++ if (capacities.isEmpty()) { CapacityVO newVlanCapacity = new CapacityVO(null, dcId, null, null, allocatedVlans, totalVlans, Capacity.CAPACITY_TYPE_VLAN); newVlanCapacity.setCapacityState(vlanCapacityState); _capacityDao.persist(newVlanCapacity); @@@ -430,16 -400,16 +442,13 @@@ public void createOrUpdateIpCapacity(Long dcId, Long podId, short capacityType, AllocationState capacityState) { SearchCriteria<CapacityVO> capacitySC = _capacityDao.createSearchCriteria(); -- -- List<CapacityVO> capacities = _capacityDao.search(capacitySC, null); -- capacitySC = _capacityDao.createSearchCriteria(); capacitySC.addAnd("podId", SearchCriteria.Op.EQ, podId); capacitySC.addAnd("dataCenterId", SearchCriteria.Op.EQ, dcId); capacitySC.addAnd("capacityType", SearchCriteria.Op.EQ, capacityType); int totalIPs; int allocatedIPs; -- capacities = _capacityDao.search(capacitySC, null); ++ List<CapacityVO> capacities = _capacityDao.search(capacitySC, null); if (capacityType == Capacity.CAPACITY_TYPE_PRIVATE_IP) { totalIPs = _privateIPAddressDao.countIPs(podId, dcId, false); allocatedIPs = _privateIPAddressDao.countIPs(podId, dcId, true); @@@ -452,7 -422,7 +461,7 @@@ } CapacityState ipCapacityState = (capacityState == AllocationState.Disabled) ? CapacityState.Disabled : CapacityState.Enabled; -- if (capacities.size() == 0) { ++ if (capacities.isEmpty()) { CapacityVO newPublicIPCapacity = new CapacityVO(null, dcId, podId, null, allocatedIPs, totalIPs, capacityType); newPublicIPCapacity.setCapacityState(ipCapacityState); _capacityDao.persist(newPublicIPCapacity); @@@ -477,7 -447,7 +486,7 @@@ int total = usedTotal.second(); int allocated = usedTotal.first(); CapacityState state = (capacityState == AllocationState.Disabled) ? CapacityState.Disabled : CapacityState.Enabled; -- if (capacities.size() == 0) { ++ if (capacities.isEmpty()) { CapacityVO capacityVO = new CapacityVO(null, dcId, null, null, allocated, total, capacityType); capacityVO.setCapacityState(state); _capacityDao.persist(capacityVO); @@@ -524,13 -494,13 +533,12 @@@ // Generate Alerts for Zone Level capacities for (DataCenterVO dc : dataCenterList) { for (Short capacityType : dataCenterCapacityTypes) { -- List<SummedCapacity> capacity = new ArrayList<SummedCapacity>(); -- capacity = _capacityDao.findCapacityBy(capacityType.intValue(), dc.getId(), null, null); ++ List<SummedCapacity> capacity = _capacityDao.findCapacityBy(capacityType.intValue(), dc.getId(), null, null); if (capacityType == Capacity.CAPACITY_TYPE_SECONDARY_STORAGE) { capacity.add(getUsedStats(capacityType, dc.getId(), null, null)); } -- if (capacity == null || capacity.size() == 0) { ++ if (capacity == null || capacity.isEmpty()) { continue; } double totalCapacity = capacity.get(0).getTotalCapacity(); @@@ -545,7 -515,7 +553,7 @@@ for (HostPodVO pod : podList) { for (Short capacityType : podCapacityTypes) { List<SummedCapacity> capacity = _capacityDao.findCapacityBy(capacityType.intValue(), pod.getDataCenterId(), pod.getId(), null); -- if (capacity == null || capacity.size() == 0) { ++ if (capacity == null || capacity.isEmpty()) { continue; } double totalCapacity = capacity.get(0).getTotalCapacity(); @@@ -559,11 -529,11 +567,10 @@@ // Generate Alerts for Cluster Level capacities for (ClusterVO cluster : clusterList) { for (Short capacityType : clusterCapacityTypes) { -- List<SummedCapacity> capacity = new ArrayList<SummedCapacity>(); -- capacity = _capacityDao.findCapacityBy(capacityType.intValue(), cluster.getDataCenterId(), null, cluster.getId()); ++ List<SummedCapacity> capacity = _capacityDao.findCapacityBy(capacityType.intValue(), cluster.getDataCenterId(), null, cluster.getId()); // cpu and memory allocated capacity notification threshold can be defined at cluster level, so getting the value if they are defined at cluster level -- double threshold = 0; ++ double threshold; switch (capacityType) { case Capacity.CAPACITY_TYPE_STORAGE: capacity.add(getUsedStats(capacityType, cluster.getDataCenterId(), cluster.getPodId(), cluster.getId())); @@@ -581,7 -551,7 +588,7 @@@ default: threshold = _capacityTypeThresholdMap.get(capacityType); } -- if (capacity == null || capacity.size() == 0) { ++ if (capacity == null || capacity.isEmpty()) { continue; } @@@ -697,7 -693,7 +704,7 @@@ private List<Short> getCapacityTypesAtZoneLevel() { -- List<Short> dataCenterCapacityTypes = new ArrayList<Short>(); ++ List<Short> dataCenterCapacityTypes = new ArrayList<>(); dataCenterCapacityTypes.add(Capacity.CAPACITY_TYPE_VIRTUAL_NETWORK_PUBLIC_IP); dataCenterCapacityTypes.add(Capacity.CAPACITY_TYPE_DIRECT_ATTACHED_PUBLIC_IP); dataCenterCapacityTypes.add(Capacity.CAPACITY_TYPE_SECONDARY_STORAGE); @@@ -709,7 -705,7 +716,7 @@@ private List<Short> getCapacityTypesAtPodLevel() { -- List<Short> podCapacityTypes = new ArrayList<Short>(); ++ List<Short> podCapacityTypes = new ArrayList<>(); podCapacityTypes.add(Capacity.CAPACITY_TYPE_PRIVATE_IP); return podCapacityTypes; @@@ -717,7 -713,7 +724,7 @@@ private List<Short> getCapacityTypesAtClusterLevel() { -- List<Short> clusterCapacityTypes = new ArrayList<Short>(); ++ List<Short> clusterCapacityTypes = new ArrayList<>(); clusterCapacityTypes.add(Capacity.CAPACITY_TYPE_CPU); clusterCapacityTypes.add(Capacity.CAPACITY_TYPE_MEMORY); clusterCapacityTypes.add(Capacity.CAPACITY_TYPE_STORAGE); @@@ -740,27 -736,8 +747,19 @@@ public void sendAlert(AlertType alertType, long dataCenterId, Long podId, Long clusterId, String subject, String content) throws MessagingException, UnsupportedEncodingException { - logger.warn(String.format("alertType=[%s] dataCenterId=[%s] podId=[%s] clusterId=[%s] message=[%s].", alertType, dataCenterId, podId, clusterId, subject)); - AlertVO alert = getAlertForTrivialAlertType(alertType, dataCenterId, podId, clusterId); + DataCenterVO zone = _dcDao.findById(dataCenterId); + HostPodVO pod = podId == null ? null : _podDao.findById(podId); + ClusterVO cluster = clusterId == null ? null : _clusterDao.findById(clusterId); + sendAlert(alertType, zone, pod, cluster, subject, content); + } + + public void sendAlert(AlertType alertType, DataCenter dataCenter, Pod pod, Cluster cluster, String subject, String content) + throws MessagingException, UnsupportedEncodingException { - logger.warn(String.format("alertType=[%s] dataCenter=[%s] pod=[%s] cluster=[%s] message=[%s].", alertType, dataCenter, pod, cluster, subject)); - AlertVO alert = null; + Long clusterId = cluster == null ? null : cluster.getId(); + Long podId = pod == null ? null : pod.getId(); + long dcId = dataCenter == null ? 0L : dataCenter.getId(); - if ((alertType != AlertManager.AlertType.ALERT_TYPE_HOST) && (alertType != AlertManager.AlertType.ALERT_TYPE_USERVM) - && (alertType != AlertManager.AlertType.ALERT_TYPE_DOMAIN_ROUTER) && (alertType != AlertManager.AlertType.ALERT_TYPE_CONSOLE_PROXY) - && (alertType != AlertManager.AlertType.ALERT_TYPE_SSVM) && (alertType != AlertManager.AlertType.ALERT_TYPE_STORAGE_MISC) - && (alertType != AlertManager.AlertType.ALERT_TYPE_MANAGEMENT_NODE) && (alertType != AlertManager.AlertType.ALERT_TYPE_RESOURCE_LIMIT_EXCEEDED) - && (alertType != AlertManager.AlertType.ALERT_TYPE_UPLOAD_FAILED) && (alertType != AlertManager.AlertType.ALERT_TYPE_OOBM_AUTH_ERROR) - && (alertType != AlertManager.AlertType.ALERT_TYPE_HA_ACTION) && (alertType != AlertManager.AlertType.ALERT_TYPE_CA_CERT)) { - alert = _alertDao.getLastAlert(alertType.getType(), dcId, podId, clusterId); - } ++ logger.warn(String.format("alertType=[%s] dataCenterId=[%s] podId=[%s] clusterId=[%s] message=[%s].", alertType, dcId, podId, clusterId, subject)); ++ AlertVO alert = getAlertForTrivialAlertType(alertType, dcId, podId, clusterId); if (alert == null) { AlertVO newAlert = new AlertVO();
