This is an automated email from the ASF dual-hosted git repository.
dahn pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/cloudstack.git
The following commit(s) were added to refs/heads/main by this push:
new f090c77f419 misc: fix spelling (#7549)
f090c77f419 is described below
commit f090c77f419d68dcb9ff58d0815e254b1348d739
Author: John Bampton <[email protected]>
AuthorDate: Thu Nov 2 18:23:53 2023 +1000
misc: fix spelling (#7549)
Co-authored-by: Stephan Krug <[email protected]>
---
.../cloudstack/api/command/admin/host/DeleteHostCmd.java | 4 ++--
.../user/loadbalancer/AssignToLoadBalancerRuleCmd.java | 2 +-
.../java/com/cloud/offerings/dao/NetworkOfferingDaoImpl.java | 2 +-
.../java/com/cloud/upgrade/DatabaseIntegrityChecker.java | 2 +-
.../src/main/java/com/cloud/upgrade/dao/Upgrade218to22.java | 2 +-
.../src/main/java/com/cloud/upgrade/dao/Upgrade224to225.java | 4 ++--
.../src/main/java/com/cloud/upgrade/dao/Upgrade301to302.java | 2 +-
.../schema/src/main/resources/META-INF/db/schema-21to22.sql | 4 ++--
.../src/main/resources/META-INF/db/schema-2212to2213.sql | 2 +-
.../src/main/resources/META-INF/db/schema-2214to30.sql | 2 +-
.../src/main/resources/META-INF/db/schema-225to226.sql | 2 +-
.../src/main/resources/META-INF/db/schema-227to228.sql | 2 +-
.../src/main/resources/META-INF/db/schema-228to229.sql | 2 +-
.../src/main/resources/META-INF/db/schema-229to2210.sql | 2 +-
.../schema/src/main/resources/META-INF/db/schema-302to40.sql | 2 +-
.../src/main/resources/META-INF/db/schema-307to410.sql | 2 +-
.../src/main/resources/META-INF/db/schema-410to420.sql | 8 ++++----
.../src/main/resources/META-INF/db/schema-41310to41400.sql | 2 +-
.../src/main/resources/META-INF/db/schema-41720to41800.sql | 2 +-
.../src/main/resources/META-INF/db/schema-420to421.sql | 2 +-
.../cloudstack/storage/snapshot/DefaultSnapshotStrategy.java | 2 +-
.../storage/datastore/ObjectInDataStoreManagerImpl.java | 2 +-
.../cloudstack/storage/endpoint/DefaultEndPointSelector.java | 2 +-
.../storage/image/db/TemplateDataStoreDaoImpl.java | 8 ++++----
.../java/org/apache/cloudstack/storage/BaseTypeTest.java | 4 ++--
.../DotNet/ServerResource/HypervResource/CloudStackTypes.cs | 2 +-
.../ServerResource.Tests/HypervResourceController1Test.cs | 10 +++++-----
.../ServerResource.Tests/HypervResourceControllerTest.cs | 10 +++++-----
.../com/cloud/hypervisor/kvm/resource/KVMGuestOsMapper.java | 2 +-
.../src/main/scripts/vm/hypervisor/ovm/OvmVolumeModule.py | 2 +-
.../hypervisor/xenserver/resource/CitrixResourceBase.java | 12 ++++++------
.../xenserver/resource/XenServerStorageProcessor.java | 2 +-
.../cloudstack/storage/datastore/util/ElastistorUtil.java | 2 +-
.../apache/cloudstack/storage/datastore/util/DateraUtil.java | 2 +-
.../driver/CloudStackPrimaryDataStoreDriverImpl.java | 2 +-
scripts/vm/hypervisor/xenserver/cloud-plugin-storage | 4 ++--
scripts/vm/hypervisor/xenserver/vmops | 4 ++--
scripts/vm/hypervisor/xenserver/vmopsSnapshot | 6 +++---
server/src/main/java/com/cloud/configuration/Config.java | 2 +-
.../src/main/java/com/cloud/network/NetworkServiceImpl.java | 2 +-
.../network/router/VirtualNetworkApplianceManagerImpl.java | 2 +-
.../main/java/com/cloud/network/rules/RulesManagerImpl.java | 2 +-
.../com/cloud/network/vpc/NetworkACLServiceImplTest.java | 6 +++---
setup/db/create-schema.sql | 2 +-
setup/db/templates.sql | 4 ++--
test/integration/component/test_browse_volumes.py | 6 +++---
test/integration/component/test_vpc_vm_life_cycle.py | 6 +++---
usage/src/test/resources/cloud1.xml | 2 +-
usage/src/test/resources/cloud2.xml | 2 +-
utils/src/test/java/com/cloud/utils/net/Ip4AddressTest.java | 4 ++--
utils/src/test/java/com/cloud/utils/net/IpTest.java | 4 ++--
51 files changed, 87 insertions(+), 87 deletions(-)
diff --git
a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/DeleteHostCmd.java
b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/DeleteHostCmd.java
index 609d189b0ca..934965cd24c 100644
---
a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/DeleteHostCmd.java
+++
b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/DeleteHostCmd.java
@@ -64,7 +64,7 @@ public class DeleteHostCmd extends BaseCmd {
return (forced != null) ? forced : false;
}
- public boolean isForceDestoryLocalStorage() {
+ public boolean isForceDestroyLocalStorage() {
return (forceDestroyLocalStorage != null) ? forceDestroyLocalStorage :
true;
}
@@ -79,7 +79,7 @@ public class DeleteHostCmd extends BaseCmd {
@Override
public void execute() {
- boolean result = _resourceService.deleteHost(getId(), isForced(),
isForceDestoryLocalStorage());
+ boolean result = _resourceService.deleteHost(getId(), isForced(),
isForceDestroyLocalStorage());
if (result) {
SuccessResponse response = new SuccessResponse(getCommandName());
this.setResponseObject(response);
diff --git
a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/AssignToLoadBalancerRuleCmd.java
b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/AssignToLoadBalancerRuleCmd.java
index 7647b017f29..c245ab236fe 100644
---
a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/AssignToLoadBalancerRuleCmd.java
+++
b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/AssignToLoadBalancerRuleCmd.java
@@ -133,7 +133,7 @@ public class AssignToLoadBalancerRuleCmd extends
BaseAsyncCmd {
throw new InvalidParameterValueException("Unable to find
virtual machine ID: " + vmId);
}
- //check wether the given ip is valid ip or not
+ //check whether the given ip is valid ip or not
if (vmIp == null || !NetUtils.isValidIp4(vmIp)) {
throw new InvalidParameterValueException("Invalid ip
address "+ vmIp +" passed in vmidipmap for " +
"vmid " + vmId);
diff --git
a/engine/schema/src/main/java/com/cloud/offerings/dao/NetworkOfferingDaoImpl.java
b/engine/schema/src/main/java/com/cloud/offerings/dao/NetworkOfferingDaoImpl.java
index a35e7914895..823ea36b97f 100644
---
a/engine/schema/src/main/java/com/cloud/offerings/dao/NetworkOfferingDaoImpl.java
+++
b/engine/schema/src/main/java/com/cloud/offerings/dao/NetworkOfferingDaoImpl.java
@@ -228,7 +228,7 @@ public class NetworkOfferingDaoImpl extends
GenericDaoBase<NetworkOfferingVO, Lo
}
/**
- * Persist L2 deafult Network offering
+ * Persist L2 default Network offering
*/
private void persistL2DefaultNetworkOffering(String name, String
displayText, boolean specifyVlan, boolean configDriveEnabled) {
NetworkOfferingVO offering = new NetworkOfferingVO(name, displayText,
TrafficType.Guest, false, specifyVlan,
diff --git
a/engine/schema/src/main/java/com/cloud/upgrade/DatabaseIntegrityChecker.java
b/engine/schema/src/main/java/com/cloud/upgrade/DatabaseIntegrityChecker.java
index bb75aacf2b6..1fc8b7e3d84 100644
---
a/engine/schema/src/main/java/com/cloud/upgrade/DatabaseIntegrityChecker.java
+++
b/engine/schema/src/main/java/com/cloud/upgrade/DatabaseIntegrityChecker.java
@@ -86,7 +86,7 @@ public class DatabaseIntegrityChecker extends AdapterBase
implements SystemInteg
boolean noDuplicate = true;
StringBuffer helpInfo = new StringBuffer();
String note =
- "DATABASE INTEGRITY ERROR\nManagement server detected
there are some hosts connect to the same loacal storage, please contact
CloudStack support team for solution. Below are detialed info, please attach
all of them to CloudStack support. Thank you\n";
+ "DATABASE INTEGRITY ERROR\nManagement server detected
there are some hosts connect to the same local storage, please contact
CloudStack support team for solution. Below are detailed info, please attach
all of them to CloudStack support. Thank you\n";
helpInfo.append(note);
while (rs.next()) {
try ( PreparedStatement sel_pstmt =
diff --git
a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade218to22.java
b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade218to22.java
index c98ba48a3f6..bc58794e8bd 100644
--- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade218to22.java
+++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade218to22.java
@@ -1168,7 +1168,7 @@ public class Upgrade218to22 implements DbUpgrade {
pstmt.executeUpdate();
s_logger.debug("Upgraded userStatistcis with
device_type=DomainRouter");
- // update device_id infrormation
+ // update device_id information
try (
PreparedStatement selectUserStatistics =
conn.prepareStatement("SELECT id, account_id, data_center_id FROM
user_statistics");
ResultSet rs = selectUserStatistics.executeQuery();
diff --git
a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade224to225.java
b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade224to225.java
index 1e23377c012..48908f5c7d5 100644
--- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade224to225.java
+++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade224to225.java
@@ -332,7 +332,7 @@ public class Upgrade224to225 implements DbUpgrade {
pstmt.close();
} catch (SQLException e) {
s_logger.error("Unable to add missing foreign key; following
statement was executed:" + pstmt);
- throw new CloudRuntimeException("Unable to add missign keys due to
exception", e);
+ throw new CloudRuntimeException("Unable to add missing keys due to
exception", e);
}
}
@@ -348,7 +348,7 @@ public class Upgrade224to225 implements DbUpgrade {
}
} catch (SQLException e) {
s_logger.error("Unable to add missing ovs tunnel account due to ",
e);
- throw new CloudRuntimeException("Unable to add missign ovs tunnel
account due to ", e);
+ throw new CloudRuntimeException("Unable to add missing ovs tunnel
account due to ", e);
}
}
}
diff --git
a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade301to302.java
b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade301to302.java
index cafd025982a..ba479b52f89 100644
--- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade301to302.java
+++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade301to302.java
@@ -66,7 +66,7 @@ public class Upgrade301to302 extends LegacyDbUpgrade {
keys.add("i_host__allocation_state");
uniqueKeys.put("host", keys);
- s_logger.debug("Droping i_host__allocation_state key in host table");
+ s_logger.debug("Dropping i_host__allocation_state key in host table");
for (String tableName : uniqueKeys.keySet()) {
DbUpgradeUtils.dropKeysIfExist(conn, tableName,
uniqueKeys.get(tableName), false);
}
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-21to22.sql
b/engine/schema/src/main/resources/META-INF/db/schema-21to22.sql
index 9502ea5aa56..eb473cfc7f6 100755
--- a/engine/schema/src/main/resources/META-INF/db/schema-21to22.sql
+++ b/engine/schema/src/main/resources/META-INF/db/schema-21to22.sql
@@ -646,7 +646,7 @@ INSERT INTO `cloud`.`guest_os` (id, category_id,
display_name) VALUES (88, 6, 'W
INSERT INTO `cloud`.`guest_os` (id, category_id, display_name) VALUES (89, 6,
'Windows Server 2003 Standard Edition(32-bit)');
INSERT INTO `cloud`.`guest_os` (id, category_id, display_name) VALUES (90, 6,
'Windows Server 2003 Standard Edition(64-bit)');
INSERT INTO `cloud`.`guest_os` (id, category_id, display_name) VALUES (91, 6,
'Windows Server 2003 Web Edition');
-INSERT INTO `cloud`.`guest_os` (id, category_id, display_name) VALUES (92, 6,
'Microsoft Small Bussiness Server 2003');
+INSERT INTO `cloud`.`guest_os` (id, category_id, display_name) VALUES (92, 6,
'Microsoft Small Business Server 2003');
INSERT INTO `cloud`.`guest_os` (id, category_id, display_name) VALUES (93, 6,
'Windows XP (32-bit)');
INSERT INTO `cloud`.`guest_os` (id, category_id, display_name) VALUES (94, 6,
'Windows XP (64-bit)');
INSERT INTO `cloud`.`guest_os` (id, category_id, display_name) VALUES (95, 6,
'Windows 2000 Advanced Server');
@@ -779,7 +779,7 @@ INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type,
guest_os_name, guest
INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name,
guest_os_id) VALUES ("VmWare", 'Microsoft Windows Server 2003, Standard Edition
(32-bit)', 89);
INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name,
guest_os_id) VALUES ("VmWare", 'Microsoft Windows Server 2003, Standard Edition
(64-bit)', 90);
INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name,
guest_os_id) VALUES ("VmWare", 'Microsoft Windows Server 2003, Web Edition',
91);
-INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name,
guest_os_id) VALUES ("VmWare", 'Microsoft Small Bussiness Server 2003', 92);
+INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name,
guest_os_id) VALUES ("VmWare", 'Microsoft Small Business Server 2003', 92);
INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name,
guest_os_id) VALUES ("VmWare", 'Microsoft Windows Vista (32-bit)', 56);
INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name,
guest_os_id) VALUES ("VmWare", 'Microsoft Windows Vista (64-bit)', 101);
INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name,
guest_os_id) VALUES ("VmWare", 'Microsoft Windows XP Professional (32-bit)',
93);
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-2212to2213.sql
b/engine/schema/src/main/resources/META-INF/db/schema-2212to2213.sql
index a94f60c7381..2e86599f792 100644
--- a/engine/schema/src/main/resources/META-INF/db/schema-2212to2213.sql
+++ b/engine/schema/src/main/resources/META-INF/db/schema-2212to2213.sql
@@ -77,6 +77,6 @@ update host_details set name='memory' where host_id in
(select id from host wher
update host_details set name='privateip' where host_id in (select id from host
where hypervisor_type='BareMetal') and name='agentIp';
INSERT IGNORE INTO configuration VALUES ('Advanced', 'DEFAULT',
'management-server', 'vmware.root.disk.controller', 'ide', 'Specify the default
disk controller for root volumes, valid values are scsi, ide');
-INSERT IGNORE INTO configuration VALUES ('Advanced', 'DEFAULT',
'management-server', 'vm.destory.forcestop', 'false', 'On destory, force-stop
takes this value');
+INSERT IGNORE INTO configuration VALUES ('Advanced', 'DEFAULT',
'management-server', 'vm.destroy.forcestop', 'false', 'On destroy, force-stop
takes this value');
INSERT IGNORE INTO configuration VALUES ('Network', 'DEFAULT',
'management-server', 'network.lock.timeout', '600', 'Lock wait timeout
(seconds) while implementing network');
INSERT IGNORE INTO configuration VALUES ('Network', 'DEFAULT',
'management-server', 'network.disable.rpfilter','true','disable rp_filter on
Domain Router VM public interfaces.');
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-2214to30.sql
b/engine/schema/src/main/resources/META-INF/db/schema-2214to30.sql
index d77ff169145..22fda616649 100755
--- a/engine/schema/src/main/resources/META-INF/db/schema-2214to30.sql
+++ b/engine/schema/src/main/resources/META-INF/db/schema-2214to30.sql
@@ -702,7 +702,7 @@ UPDATE `cloud`.`configuration` SET category = 'Usage' where
name in ('usage.exec
ALTER TABLE `cloud`.`op_dc_vnet_alloc` ADD CONSTRAINT
`fk_op_dc_vnet_alloc__data_center_id` FOREIGN KEY (`data_center_id`) REFERENCES
`data_center`(`id`) ON DELETE CASCADE;
ALTER TABLE `cloud`.`domain` ADD COLUMN `type` varchar(255) NOT NULL DEFAULT
'Normal' COMMENT 'type of the domain - can be Normal or Project';
-UPDATE `cloud`.`configuration` SET name='vm.destroy.forcestop' where
name='vm.destory.forcestop';
+UPDATE `cloud`.`configuration` SET name='vm.destroy.forcestop' where
name='vm.destroy.forcestop';
INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT',
'management-server', 'vm.destroy.forcestop', 'false', 'On destroy, force-stop
takes this value');
DELETE FROM `cloud`.`configuration` where name='skip.steps';
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-225to226.sql
b/engine/schema/src/main/resources/META-INF/db/schema-225to226.sql
index 49f948c7b29..ec1baae2e69 100644
--- a/engine/schema/src/main/resources/META-INF/db/schema-225to226.sql
+++ b/engine/schema/src/main/resources/META-INF/db/schema-225to226.sql
@@ -31,7 +31,7 @@ CREATE TABLE IF NOT EXISTS `cloud`.`keystore` (
`id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
`name` varchar(64) NOT NULL COMMENT 'unique name for the certifiation',
`certificate` text NOT NULL COMMENT 'the actual certificate being stored in
the db',
- `key` text NOT NULL COMMENT 'private key associated wih the certificate',
+ `key` text NOT NULL COMMENT 'private key associated with the certificate',
`domain_suffix` varchar(256) NOT NULL COMMENT 'DNS domain suffix associated
with the certificate',
PRIMARY KEY (`id`),
UNIQUE(name)
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-227to228.sql
b/engine/schema/src/main/resources/META-INF/db/schema-227to228.sql
index 6828bd1abe6..343c7663fd2 100644
--- a/engine/schema/src/main/resources/META-INF/db/schema-227to228.sql
+++ b/engine/schema/src/main/resources/META-INF/db/schema-227to228.sql
@@ -32,7 +32,7 @@ CREATE TABLE IF NOT EXISTS `cloud`.`keystore` (
`id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
`name` varchar(64) NOT NULL COMMENT 'unique name for the certifiation',
`certificate` text NOT NULL COMMENT 'the actual certificate being stored in
the db',
- `key` text NOT NULL COMMENT 'private key associated wih the certificate',
+ `key` text NOT NULL COMMENT 'private key associated with the certificate',
`domain_suffix` varchar(256) NOT NULL COMMENT 'DNS domain suffix associated
with the certificate',
PRIMARY KEY (`id`),
UNIQUE(name)
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-228to229.sql
b/engine/schema/src/main/resources/META-INF/db/schema-228to229.sql
index edc46c0e46b..9d5baa4c403 100644
--- a/engine/schema/src/main/resources/META-INF/db/schema-228to229.sql
+++ b/engine/schema/src/main/resources/META-INF/db/schema-228to229.sql
@@ -68,7 +68,7 @@ ALTER TABLE `cloud`.`port_forwarding_rules` ADD CONSTRAINT
`fk_port_forwarding_r
INSERT IGNORE INTO configuration VALUES ('Advanced', 'DEFAULT',
'management-server', 'agent.load.threshold', '0.70', 'Percentage (as a value
between 0 and 1) of connected agents after which agent load balancing will
start happening');
INSERT IGNORE INTO configuration VALUES ('Network', 'DEFAULT',
'management-server', 'network.loadbalancer.haproxy.stats.visibility', 'global',
'Load Balancer(haproxy) stats visibility, it can take the following four
parameters : global,guest-network,link-local,disabled');
INSERT IGNORE INTO configuration VALUES ('Network', 'DEFAULT',
'management-server',
'network.loadbalancer.haproxy.stats.uri','/admin?stats','Load Balancer(haproxy)
uri.');
-INSERT IGNORE INTO configuration VALUES ('Network', 'DEFAULT',
'management-server',
'network.loadbalancer.haproxy.stats.auth','admin1:AdMiN123','Load
Balancer(haproxy) authetication string in the format username:password');
+INSERT IGNORE INTO configuration VALUES ('Network', 'DEFAULT',
'management-server',
'network.loadbalancer.haproxy.stats.auth','admin1:AdMiN123','Load
Balancer(haproxy) authentication string in the format username:password');
INSERT IGNORE INTO configuration VALUES ('Network', 'DEFAULT',
'management-server', 'network.loadbalancer.haproxy.stats.port','8081','Load
Balancer(haproxy) stats port number.');
INSERT IGNORE INTO configuration VALUES ('Advanced', 'DEFAULT',
'NetworkManager', 'use.external.dns', 'false', 'Bypass the cloudstack DHCP/DNS
server vm name service, use zone external dns1 and dns2');
INSERT IGNORE INTO configuration VALUES ('Advanced', 'DEFAULT',
'management-server', 'network.loadbalancer.basiczone.elb.enabled', 'false',
'Whether the load balancing service is enabled for basic zones');
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-229to2210.sql
b/engine/schema/src/main/resources/META-INF/db/schema-229to2210.sql
index d549e680370..9c5c46242af 100644
--- a/engine/schema/src/main/resources/META-INF/db/schema-229to2210.sql
+++ b/engine/schema/src/main/resources/META-INF/db/schema-229to2210.sql
@@ -50,7 +50,7 @@ INSERT IGNORE INTO `cloud`.`configuration` (category,
instance, name, value, des
INSERT IGNORE INTO configuration VALUES ('Advanced', 'DEFAULT',
'management-server', 'agent.load.threshold', '0.70', 'Percentage (as a value
between 0 and 1) of connected agents after which agent load balancing will
start happening');
INSERT IGNORE INTO configuration VALUES ('Network', 'DEFAULT',
'management-server', 'network.loadbalancer.haproxy.stats.visibility', 'global',
'Load Balancer(haproxy) stats visibility, it can take the following four
parameters : global,guest-network,link-local,disabled');
INSERT IGNORE INTO configuration VALUES ('Network', 'DEFAULT',
'management-server',
'network.loadbalancer.haproxy.stats.uri','/admin?stats','Load Balancer(haproxy)
uri.');
-INSERT IGNORE INTO configuration VALUES ('Network', 'DEFAULT',
'management-server',
'network.loadbalancer.haproxy.stats.auth','admin1:AdMiN123','Load
Balancer(haproxy) authetication string in the format username:password');
+INSERT IGNORE INTO configuration VALUES ('Network', 'DEFAULT',
'management-server',
'network.loadbalancer.haproxy.stats.auth','admin1:AdMiN123','Load
Balancer(haproxy) authentication string in the format username:password');
INSERT IGNORE INTO configuration VALUES ('Network', 'DEFAULT',
'management-server', 'network.loadbalancer.haproxy.stats.port','8081','Load
Balancer(haproxy) stats port number.');
INSERT IGNORE INTO configuration VALUES ('Advanced', 'DEFAULT',
'NetworkManager', 'use.external.dns', 'false', 'Bypass the cloudstack DHCP/DNS
server vm name service, use zone external dns1 and dns2');
INSERT IGNORE INTO configuration VALUES ('Advanced', 'DEFAULT',
'management-server', 'network.loadbalancer.basiczone.elb.enabled', 'false',
'Whether the load balancing service is enabled for basic zones');
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-302to40.sql
b/engine/schema/src/main/resources/META-INF/db/schema-302to40.sql
index e632fa679e5..ca99f0106d2 100644
--- a/engine/schema/src/main/resources/META-INF/db/schema-302to40.sql
+++ b/engine/schema/src/main/resources/META-INF/db/schema-302to40.sql
@@ -227,7 +227,7 @@ CREATE TABLE `cloud`.`nicira_nvp_nic_map` (
-- rrq 5839
-- Remove the unique constraint on physical_network_id, provider_name from
physical_network_service_providers
--- Because the name of this contraint is not set we need this roundabout way
+-- Because the name of this constraint is not set we need this roundabout way
-- The key is also used by the foreign key constraint so drop and recreate
that one
ALTER TABLE `cloud`.`physical_network_service_providers` DROP FOREIGN KEY
fk_pnetwork_service_providers__physical_network_id;
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-307to410.sql
b/engine/schema/src/main/resources/META-INF/db/schema-307to410.sql
index e5387853b3c..944d910fec4 100644
--- a/engine/schema/src/main/resources/META-INF/db/schema-307to410.sql
+++ b/engine/schema/src/main/resources/META-INF/db/schema-307to410.sql
@@ -51,7 +51,7 @@ ALTER TABLE `storage_pool` ADD `user_info` VARCHAR( 255 )
NULL COMMENT 'Authoriz
INSERT INTO `cloud`.`configuration` (`category`, `instance`, `component`,
`name`, `value`, `description`) VALUES ('Advanced', 'DEFAULT',
'management-server', 'event.purge.interval', '86400', 'The interval (in
seconds) to wait before running the event purge thread');
-- rrq 5839
-- Remove the unique constraint on physical_network_id, provider_name from
physical_network_service_providers
--- Because the name of this contraint is not set we need this roundabout way
+-- Because the name of this constraint is not set we need this roundabout way
-- The key is also used by the foreign key constraint so drop and recreate
that one
ALTER TABLE physical_network_service_providers DROP FOREIGN KEY
fk_pnetwork_service_providers__physical_network_id;
SET @constraintname = (select CONCAT(CONCAT('DROP INDEX ', A.CONSTRAINT_NAME),
' ON physical_network_service_providers' )
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-410to420.sql
b/engine/schema/src/main/resources/META-INF/db/schema-410to420.sql
index 59b6c58d97a..3556e7e1b4a 100644
--- a/engine/schema/src/main/resources/META-INF/db/schema-410to420.sql
+++ b/engine/schema/src/main/resources/META-INF/db/schema-410to420.sql
@@ -154,7 +154,7 @@ CREATE VIEW `cloud`.`image_store_view` AS
`cloud`.`image_store_details` ON image_store_details.store_id =
image_store.id;
--- here we have to allow null for store_id to accomodate baremetal case to
search for ready templates since template state is only stored in this table
+-- here we have to allow null for store_id to accommodate baremetal case to
search for ready templates since template state is only stored in this table
-- FK also commented out due to this
CREATE TABLE `cloud`.`template_store_ref` (
`id` bigint unsigned NOT NULL auto_increment,
@@ -525,9 +525,9 @@ CREATE VIEW `cloud`.`event_view` AS
left join
`cloud`.`event` eve ON event.start_id = eve.id;
-ALTER TABLE `cloud`.`region` ADD COLUMN `portableip_service_enabled`
tinyint(1) unsigned NOT NULL DEFAULT 0 COMMENT 'Is Portable IP service enalbed
in the Region';
+ALTER TABLE `cloud`.`region` ADD COLUMN `portableip_service_enabled`
tinyint(1) unsigned NOT NULL DEFAULT 0 COMMENT 'Is Portable IP service enabled
in the Region';
-ALTER TABLE `cloud`.`region` ADD COLUMN `gslb_service_enabled` tinyint(1)
unsigned NOT NULL DEFAULT 1 COMMENT 'Is GSLB service enalbed in the Region';
+ALTER TABLE `cloud`.`region` ADD COLUMN `gslb_service_enabled` tinyint(1)
unsigned NOT NULL DEFAULT 1 COMMENT 'Is GSLB service enabled in the Region';
ALTER TABLE `cloud`.`external_load_balancer_devices` ADD COLUMN
`is_gslb_provider` int(1) unsigned NOT NULL DEFAULT 0 COMMENT '1 if load
balancer appliance is acting as gslb service provider in the zone';
@@ -2067,7 +2067,7 @@ update `cloud`.`vpc_gateways` set network_acl_id = 2;
INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT',
'VpcManager', 'blacklisted.routes', NULL, 'Routes that are blacklisted, can not
be used for Static Routes creation for the VPC Private Gateway');
-INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT',
'management-server', 'enable.dynamic.scale.vm', 'false', 'Enables/Diables
dynamically scaling a vm');
+INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT',
'management-server', 'enable.dynamic.scale.vm', 'false', 'Enables/Disables
dynamically scaling a vm');
INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT',
'management-server', 'scale.retry', '2', 'Number of times to retry scaling up
the vm');
diff --git
a/engine/schema/src/main/resources/META-INF/db/schema-41310to41400.sql
b/engine/schema/src/main/resources/META-INF/db/schema-41310to41400.sql
index baa7bcf9617..fbbf0a2aef8 100644
--- a/engine/schema/src/main/resources/META-INF/db/schema-41310to41400.sql
+++ b/engine/schema/src/main/resources/META-INF/db/schema-41310to41400.sql
@@ -20,7 +20,7 @@
--;
-- Update the description to indicate this only works with KVM + Ceph
--- (not implemented properly atm for KVM+NFS/local, and it accidentaly works
with XS + NFS. Not applicable for VMware)
+-- (not implemented properly atm for KVM+NFS/local, and it accidentally works
with XS + NFS. Not applicable for VMware)
UPDATE `cloud`.`configuration` SET `description`='Indicates whether to always
backup primary storage snapshot to secondary storage. Keeping snapshots only on
Primary storage is applicable for KVM + Ceph only.' WHERE
`name`='snapshot.backup.to.secondary';
-- KVM: enable storage data motion on KVM hypervisor_capabilities
diff --git
a/engine/schema/src/main/resources/META-INF/db/schema-41720to41800.sql
b/engine/schema/src/main/resources/META-INF/db/schema-41720to41800.sql
index 04acdca4191..c51d5a43045 100644
--- a/engine/schema/src/main/resources/META-INF/db/schema-41720to41800.sql
+++ b/engine/schema/src/main/resources/META-INF/db/schema-41720to41800.sql
@@ -402,7 +402,7 @@ GROUP BY
ALTER TABLE `cloud`.`load_balancing_rules`
ADD cidr_list VARCHAR(4096);
--- savely add resources in parallel
+-- safely add resources in parallel
-- PR#5984 Create table to persist VM stats.
DROP TABLE IF EXISTS `cloud`.`resource_reservation`;
CREATE TABLE `cloud`.`resource_reservation` (
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-420to421.sql
b/engine/schema/src/main/resources/META-INF/db/schema-420to421.sql
index bf1acc17bc2..b99af287bc5 100644
--- a/engine/schema/src/main/resources/META-INF/db/schema-420to421.sql
+++ b/engine/schema/src/main/resources/META-INF/db/schema-420to421.sql
@@ -238,7 +238,7 @@ update `cloud`.`volumes` v, `cloud`.`volume_host_ref` vhr
set v.format=vhr.fo
INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT',
'management-server', 'baremetal.ipmi.lan.interface', 'default', 'option
specified in -I option of impitool. candidates are:
open/bmc/lipmi/lan/lanplus/free/imb, see ipmitool man page for details. default
value "default" means using default option of ipmitool');
-INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT',
'management-server', 'baremetal.ipmi.fail.retry', 'default', "ipmi interface
will be temporary out of order after power opertions(e.g. cycle, on), it leads
following commands fail immediately. The value specifies retry times before
accounting it as real failure");
+INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT',
'management-server', 'baremetal.ipmi.fail.retry', 'default', "ipmi interface
will be temporary out of order after power operations(e.g. cycle, on), it leads
following commands fail immediately. The value specifies retry times before
accounting it as real failure");
INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT',
'management-server', 'vmware.hung.wokervm.timeout', '7200', 'Worker VM timeout
in seconds');
INSERT IGNORE INTO `cloud`.`configuration` VALUES ("Alert", 'DEFAULT',
'management-server', "alert.smtp.connectiontimeout", "30000", "Socket
connection timeout value in milliseconds. -1 for infinite timeout.");
INSERT IGNORE INTO `cloud`.`configuration` VALUES ("Alert", 'DEFAULT',
'management-server', "alert.smtp.timeout", "30000", "Socket I/O timeout value
in milliseconds. -1 for infinite timeout.");
diff --git
a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/DefaultSnapshotStrategy.java
b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/DefaultSnapshotStrategy.java
index 59f5b7c8682..f1f073db170 100644
---
a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/DefaultSnapshotStrategy.java
+++
b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/DefaultSnapshotStrategy.java
@@ -183,7 +183,7 @@ public class DefaultSnapshotStrategy extends
SnapshotStrategyBase {
fullBackup = false;
}
} else if (oldestSnapshotOnPrimary.getId() !=
parentSnapshotOnPrimaryStore.getId()){
- // if there is an snapshot entry for previousPool(primary
storage) of migrated volume, delete it becasue CS created one more snapshot
entry for current pool
+ // if there is an snapshot entry for previousPool(primary
storage) of migrated volume, delete it because CS created one more snapshot
entry for current pool
snapshotStoreDao.remove(oldestSnapshotOnPrimary.getId());
}
}
diff --git
a/engine/storage/src/main/java/org/apache/cloudstack/storage/datastore/ObjectInDataStoreManagerImpl.java
b/engine/storage/src/main/java/org/apache/cloudstack/storage/datastore/ObjectInDataStoreManagerImpl.java
index 47ec9890da8..3059018f8ee 100644
---
a/engine/storage/src/main/java/org/apache/cloudstack/storage/datastore/ObjectInDataStoreManagerImpl.java
+++
b/engine/storage/src/main/java/org/apache/cloudstack/storage/datastore/ObjectInDataStoreManagerImpl.java
@@ -102,7 +102,7 @@ public class ObjectInDataStoreManagerImpl implements
ObjectInDataStoreManager {
stateMachines.addTransition(State.Destroying, Event.OperationFailed,
State.Destroying);
stateMachines.addTransition(State.Failed, Event.DestroyRequested,
State.Destroying);
// TODO: further investigate why an extra event is sent when it is
- // alreay Ready for DownloadListener
+ // already Ready for DownloadListener
stateMachines.addTransition(State.Ready, Event.OperationSuccessed,
State.Ready);
// State transitions for data object migration
stateMachines.addTransition(State.Ready, Event.MigrateDataRequested,
State.Migrating);
diff --git
a/engine/storage/src/main/java/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java
b/engine/storage/src/main/java/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java
index 4c13759c1c1..bc16bafd7a9 100644
---
a/engine/storage/src/main/java/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java
+++
b/engine/storage/src/main/java/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java
@@ -428,7 +428,7 @@ public class DefaultEndPointSelector implements
EndPointSelector {
}
// If ssvm doesn't exist then find any ssvm in the zone.
- s_logger.debug("Coudn't find ssvm for url" +downloadUrl);
+ s_logger.debug("Couldn't find ssvm for url" +downloadUrl);
return findEndpointForImageStorage(store);
}
diff --git
a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/db/TemplateDataStoreDaoImpl.java
b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/db/TemplateDataStoreDaoImpl.java
index f9684d648c2..cb14506ad17 100644
---
a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/db/TemplateDataStoreDaoImpl.java
+++
b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/db/TemplateDataStoreDaoImpl.java
@@ -314,7 +314,7 @@ public class TemplateDataStoreDaoImpl extends
GenericDaoBase<TemplateDataStoreVO
@Override
public List<TemplateDataStoreVO> listByTemplateZoneDownloadStatus(long
templateId, Long zoneId, Status... status) {
- // get all elgible image stores
+ // get all eligible image stores
List<DataStore> imgStores = _storeMgr.getImageStoresByScope(new
ZoneScope(zoneId));
if (imgStores != null) {
List<TemplateDataStoreVO> result = new
ArrayList<TemplateDataStoreVO>();
@@ -341,7 +341,7 @@ public class TemplateDataStoreDaoImpl extends
GenericDaoBase<TemplateDataStoreVO
@Override
public TemplateDataStoreVO findByTemplateZoneDownloadStatus(long
templateId, Long zoneId, Status... status) {
- // get all elgible image stores
+ // get all eligible image stores
List<DataStore> imgStores = _storeMgr.getImageStoresByScope(new
ZoneScope(zoneId));
if (imgStores != null) {
for (DataStore store : imgStores) {
@@ -357,7 +357,7 @@ public class TemplateDataStoreDaoImpl extends
GenericDaoBase<TemplateDataStoreVO
@Override
public TemplateDataStoreVO findByTemplateZoneStagingDownloadStatus(long
templateId, Long zoneId, Status... status) {
- // get all elgible image stores
+ // get all eligible image stores
List<DataStore> cacheStores = _storeMgr.getImageCacheStores(new
ZoneScope(zoneId));
if (cacheStores != null) {
for (DataStore store : cacheStores) {
@@ -448,7 +448,7 @@ public class TemplateDataStoreDaoImpl extends
GenericDaoBase<TemplateDataStoreVO
@Override
public TemplateDataStoreVO findByTemplateZone(long templateId, Long
zoneId, DataStoreRole role) {
- // get all elgible image stores
+ // get all eligible image stores
List<DataStore> imgStores = null;
if (role == DataStoreRole.Image) {
imgStores = _storeMgr.getImageStoresByScope(new ZoneScope(zoneId));
diff --git
a/engine/storage/src/test/java/org/apache/cloudstack/storage/BaseTypeTest.java
b/engine/storage/src/test/java/org/apache/cloudstack/storage/BaseTypeTest.java
index 48759da2902..379ebfa0942 100644
---
a/engine/storage/src/test/java/org/apache/cloudstack/storage/BaseTypeTest.java
+++
b/engine/storage/src/test/java/org/apache/cloudstack/storage/BaseTypeTest.java
@@ -32,8 +32,8 @@ public class BaseTypeTest {
@Test
public void testIsSameTypeAs() {
- Assert.assertTrue("'a' and 'A' should be considdered the same type",
new TestType("a").isSameTypeAs("A"));
- Assert.assertTrue("'B' and 'b' should be considdered the same
address", new TestType("B").isSameTypeAs(new TestType("b")));
+ Assert.assertTrue("'a' and 'A' should be considered the same type",
new TestType("a").isSameTypeAs("A"));
+ Assert.assertTrue("'B' and 'b' should be considered the same address",
new TestType("B").isSameTypeAs(new TestType("b")));
}
class TestType extends BaseType {
String content;
diff --git
a/plugins/hypervisors/hyperv/DotNet/ServerResource/HypervResource/CloudStackTypes.cs
b/plugins/hypervisors/hyperv/DotNet/ServerResource/HypervResource/CloudStackTypes.cs
index 6e1df24f27a..306bc96f8c9 100644
---
a/plugins/hypervisors/hyperv/DotNet/ServerResource/HypervResource/CloudStackTypes.cs
+++
b/plugins/hypervisors/hyperv/DotNet/ServerResource/HypervResource/CloudStackTypes.cs
@@ -24,7 +24,7 @@ using System.Text;
using System.Threading.Tasks;
// C# versions of certain CloudStack types to simplify JSON serialisation.
-// Limit to the number of types, becasue they are written and maintained
manually.
+// Limit to the number of types, because they are written and maintained
manually.
// JsonProperty used to identify property name when serialised, which allows
// later adoption of C# naming conventions if requried.
namespace HypervResource
diff --git
a/plugins/hypervisors/hyperv/DotNet/ServerResource/ServerResource.Tests/HypervResourceController1Test.cs
b/plugins/hypervisors/hyperv/DotNet/ServerResource/ServerResource.Tests/HypervResourceController1Test.cs
index 87a12c977d2..91d41e63b9b 100644
---
a/plugins/hypervisors/hyperv/DotNet/ServerResource/ServerResource.Tests/HypervResourceController1Test.cs
+++
b/plugins/hypervisors/hyperv/DotNet/ServerResource/ServerResource.Tests/HypervResourceController1Test.cs
@@ -217,7 +217,7 @@ namespace ServerResource.Tests
{
testSampleVolumeTempURIJSON = "\"storagepool\"";
// Arrange
- String destoryCmd = //"{\"volume\":" + getSampleVolumeObjectTO() +
"}";
+ String destroyCmd = //"{\"volume\":" + getSampleVolumeObjectTO() +
"}";
"{\"volume\":{\"name\":\"" +
testSampleVolumeTempUUIDNoExt
+ "\",\"storagePoolType\":\"Filesystem\","
+ "\"mountPoint\":"
@@ -233,15 +233,15 @@ namespace ServerResource.Tests
HypervResourceController rsrcServer = new
HypervResourceController();
HypervResourceController.wmiCallsV2 = wmiCallsV2;
- dynamic jsonDestoryCmd = JsonConvert.DeserializeObject(destoryCmd);
+ dynamic jsonDestroyCmd = JsonConvert.DeserializeObject(destroyCmd);
// Act
- dynamic destoryAns = rsrcServer.DestroyCommand(jsonDestoryCmd);
+ dynamic destroyAns = rsrcServer.DestroyCommand(jsonDestroyCmd);
// Assert
- JObject ansAsProperty2 = destoryAns[0];
+ JObject ansAsProperty2 = destroyAns[0];
dynamic ans = ansAsProperty2.GetValue(CloudStackTypes.Answer);
- String path = jsonDestoryCmd.volume.path;
+ String path = jsonDestroyCmd.volume.path;
Assert.True((bool)ans.result, "DestroyCommand did not succeed " +
ans.details);
Assert.True(!File.Exists(path), "Failed to delete file " + path);
}
diff --git
a/plugins/hypervisors/hyperv/DotNet/ServerResource/ServerResource.Tests/HypervResourceControllerTest.cs
b/plugins/hypervisors/hyperv/DotNet/ServerResource/ServerResource.Tests/HypervResourceControllerTest.cs
index fab1b8243b2..7f03e151253 100644
---
a/plugins/hypervisors/hyperv/DotNet/ServerResource/ServerResource.Tests/HypervResourceControllerTest.cs
+++
b/plugins/hypervisors/hyperv/DotNet/ServerResource/ServerResource.Tests/HypervResourceControllerTest.cs
@@ -232,7 +232,7 @@ namespace ServerResource.Tests
{
// Arrange
String sampleVolume = getSampleVolumeObjectTO();
- String destoryCmd = //"{\"volume\":" + getSampleVolumeObjectTO() +
"}";
+ String destroyCmd = //"{\"volume\":" + getSampleVolumeObjectTO() +
"}";
"{\"volume\":{\"name\":\"" +
testSampleVolumeTempUUIDNoExt
+ "\",\"storagePoolType\":\"Filesystem\","
+ "\"mountPoint\":"
@@ -243,15 +243,15 @@ namespace ServerResource.Tests
+
"\"type\":\"ROOT\",\"id\":9,\"size\":0}}";
HypervResourceController rsrcServer = new
HypervResourceController();
- dynamic jsonDestoryCmd = JsonConvert.DeserializeObject(destoryCmd);
+ dynamic jsonDestroyCmd = JsonConvert.DeserializeObject(destroyCmd);
// Act
- dynamic destoryAns = rsrcServer.DestroyCommand(jsonDestoryCmd);
+ dynamic destroyAns = rsrcServer.DestroyCommand(jsonDestroyCmd);
// Assert
- JObject ansAsProperty2 = destoryAns[0];
+ JObject ansAsProperty2 = destroyAns[0];
dynamic ans = ansAsProperty2.GetValue(CloudStackTypes.Answer);
- String path = jsonDestoryCmd.volume.path;
+ String path = jsonDestroyCmd.volume.path;
Assert.True((bool)ans.result, "DestroyCommand did not succeed " +
ans.details);
Assert.True(!File.Exists(path), "Failed to delete file " + path);
}
diff --git
a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/KVMGuestOsMapper.java
b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/KVMGuestOsMapper.java
index 11d1e166f05..1cf74501770 100644
---
a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/KVMGuestOsMapper.java
+++
b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/KVMGuestOsMapper.java
@@ -107,7 +107,7 @@ public class KVMGuestOsMapper {
s_mapper.put("Windows Server 2003 Standard Edition(32-bit)", "Windows
Server 2003");
s_mapper.put("Windows Server 2003 Standard Edition(64-bit)", "Windows
Server 2003");
s_mapper.put("Windows Server 2003 Web Edition", "Windows Server 2003");
- s_mapper.put("Microsoft Small Bussiness Server 2003", "Windows Server
2003");
+ s_mapper.put("Microsoft Small Business Server 2003", "Windows Server
2003");
s_mapper.put("Windows Server 2008 (32-bit)", "Windows Server 2008");
s_mapper.put("Windows Server 2008 (64-bit)", "Windows Server 2008");
s_mapper.put("Windows Server 2008 R2 (64-bit)", "Windows Server 2008");
diff --git
a/plugins/hypervisors/ovm/src/main/scripts/vm/hypervisor/ovm/OvmVolumeModule.py
b/plugins/hypervisors/ovm/src/main/scripts/vm/hypervisor/ovm/OvmVolumeModule.py
index 0723276e9a2..d2152b1c806 100755
---
a/plugins/hypervisors/ovm/src/main/scripts/vm/hypervisor/ovm/OvmVolumeModule.py
+++
b/plugins/hypervisors/ovm/src/main/scripts/vm/hypervisor/ovm/OvmVolumeModule.py
@@ -100,7 +100,7 @@ class OvmVolume(OvmObject):
priStorageMountPoint = sr.mountpoint
volDir = join(priStorageMountPoint, 'running_pool', volDirUuid)
if exists(volDir):
- raise Exception("Volume dir %s alreay existed, can not
override"%volDir)
+ raise Exception("Volume dir %s already existed, can not
override"%volDir)
os.makedirs(volDir)
OvmStoragePool()._checkDirSizeForImage(volDir, templateUrl)
volName = volUuid + '.raw'
diff --git
a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/CitrixResourceBase.java
b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/CitrixResourceBase.java
index 8db5a1159f8..90473705a53 100644
---
a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/CitrixResourceBase.java
+++
b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/CitrixResourceBase.java
@@ -1581,7 +1581,7 @@ public abstract class CitrixResourceBase extends
ServerResourceBase implements S
try {
final Set<VDI> vdis = VDI.getByNameLabel(conn, nameLabel);
if (vdis.size() != 1) {
- s_logger.warn("destoryVDIbyNameLabel failed due to there are "
+ vdis.size() + " VDIs with name " + nameLabel);
+ s_logger.warn("destroyVDIbyNameLabel failed due to there are "
+ vdis.size() + " VDIs with name " + nameLabel);
return;
}
for (final VDI vdi : vdis) {
@@ -3199,7 +3199,7 @@ public abstract class CitrixResourceBase extends
ServerResourceBase implements S
// constraint
// for
// stability
- if (dynamicMaxRam > staticMax) { // XS contraint that dynamic max <=
+ if (dynamicMaxRam > staticMax) { // XS constraint that dynamic max <=
// static max
s_logger.warn("dynamic max " + toHumanReadableSize(dynamicMaxRam)
+ " can't be greater than static max " + toHumanReadableSize(staticMax) + ",
this can lead to stability issues. Setting static max as much as dynamic max ");
return dynamicMaxRam;
@@ -3213,7 +3213,7 @@ public abstract class CitrixResourceBase extends
ServerResourceBase implements S
return dynamicMinRam;
}
- if (dynamicMinRam < recommendedValue) { // XS contraint that dynamic
min
+ if (dynamicMinRam < recommendedValue) { // XS constraint that dynamic
min
// > static min
s_logger.warn("Vm ram is set to dynamic min " +
toHumanReadableSize(dynamicMinRam) + " and is less than the recommended static
min " + toHumanReadableSize(recommendedValue) + ", this could lead to stability
issues");
}
@@ -4589,7 +4589,7 @@ public abstract class CitrixResourceBase extends
ServerResourceBase implements S
removeSR(conn, sr);
return;
} catch (XenAPIException | XmlRpcException e) {
- s_logger.warn(logX(sr, "Unable to get current opertions " +
e.toString()), e);
+ s_logger.warn(logX(sr, "Unable to get current operations " +
e.toString()), e);
}
String msg = "Remove SR failed";
s_logger.warn(msg);
@@ -4684,9 +4684,9 @@ public abstract class CitrixResourceBase extends
ServerResourceBase implements S
removeSR(conn, sr);
return null;
} catch (final XenAPIException e) {
- s_logger.warn(logX(sr, "Unable to get current opertions " +
e.toString()), e);
+ s_logger.warn(logX(sr, "Unable to get current operations " +
e.toString()), e);
} catch (final XmlRpcException e) {
- s_logger.warn(logX(sr, "Unable to get current opertions " +
e.getMessage()), e);
+ s_logger.warn(logX(sr, "Unable to get current operations " +
e.getMessage()), e);
}
final String msg = "Remove SR failed";
s_logger.warn(msg);
diff --git
a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XenServerStorageProcessor.java
b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XenServerStorageProcessor.java
index 7c289de80c7..cb226ed7d9b 100644
---
a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XenServerStorageProcessor.java
+++
b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XenServerStorageProcessor.java
@@ -633,7 +633,7 @@ public class XenServerStorageProcessor implements
StorageProcessor {
try {
final Set<VDI> vdis = VDI.getByNameLabel(conn, nameLabel);
if (vdis.size() != 1) {
- s_logger.warn("destoryVDIbyNameLabel failed due to there are "
+ vdis.size() + " VDIs with name " + nameLabel);
+ s_logger.warn("destroyVDIbyNameLabel failed due to there are "
+ vdis.size() + " VDIs with name " + nameLabel);
return;
}
for (final VDI vdi : vdis) {
diff --git
a/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/util/ElastistorUtil.java
b/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/util/ElastistorUtil.java
index 2a272655ada..2f2ad259d6c 100644
---
a/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/util/ElastistorUtil.java
+++
b/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/util/ElastistorUtil.java
@@ -244,7 +244,7 @@ public class ElastistorUtil {
if (listAccountResponse.getAccounts().getCount() != 0) {
int i;
- // check weather a account in elasticenter with given Domain name
is
+ // check whether an account in elasticenter with given Domain name
is
// already present in the list of accounts
for (i = 0; i < listAccountResponse.getAccounts().getCount(); i++)
{
if
(domainName.equals(listAccountResponse.getAccounts().getAccount(i).getName())) {
diff --git
a/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/util/DateraUtil.java
b/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/util/DateraUtil.java
index baadd132f20..a1084bf9a40 100644
---
a/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/util/DateraUtil.java
+++
b/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/util/DateraUtil.java
@@ -943,7 +943,7 @@ public class DateraUtil {
}
/**
- * Checks wether a host initiator is present in an initiator group
+ * Checks whether a host initiator is present in an initiator group
*
* @param initiator Host initiator to check
* @param initiatorGroup the initiator group
diff --git
a/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/driver/CloudStackPrimaryDataStoreDriverImpl.java
b/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/driver/CloudStackPrimaryDataStoreDriverImpl.java
index 4453906d2aa..19a6fe13281 100644
---
a/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/driver/CloudStackPrimaryDataStoreDriverImpl.java
+++
b/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/driver/CloudStackPrimaryDataStoreDriverImpl.java
@@ -255,7 +255,7 @@ public class CloudStackPrimaryDataStoreDriverImpl
implements PrimaryDataStoreDri
}
}
} catch (Exception ex) {
- s_logger.debug("Unable to destoy volume" + data.getId(), ex);
+ s_logger.debug("Unable to destroy volume" + data.getId(), ex);
result.setResult(ex.toString());
}
callback.complete(result);
diff --git a/scripts/vm/hypervisor/xenserver/cloud-plugin-storage
b/scripts/vm/hypervisor/xenserver/cloud-plugin-storage
index d28c19543bd..01a4cdd21b2 100644
--- a/scripts/vm/hypervisor/xenserver/cloud-plugin-storage
+++ b/scripts/vm/hypervisor/xenserver/cloud-plugin-storage
@@ -105,7 +105,7 @@ def manageAvailability(path, value):
return
-def checkVolumeAvailablility(path):
+def checkVolumeAvailability(path):
try:
if not isVolumeAvailable(path):
# The VHD file is not available on XenSever. The volume is probably
@@ -172,7 +172,7 @@ def getParentOfSnapshot(snapshotUuid, primarySRPath,
isISCSI):
baseCopyUuid = ''
if isISCSI:
- checkVolumeAvailablility(snapshotPath)
+ checkVolumeAvailability(snapshotPath)
baseCopyUuid = scanParent(snapshotPath)
else:
baseCopyUuid = getParent(snapshotPath, isISCSI)
diff --git a/scripts/vm/hypervisor/xenserver/vmops
b/scripts/vm/hypervisor/xenserver/vmops
index 0d82a9d2116..4f78a3c90f8 100755
--- a/scripts/vm/hypervisor/xenserver/vmops
+++ b/scripts/vm/hypervisor/xenserver/vmops
@@ -769,7 +769,7 @@ def add_to_ipset(ipsetname, ips, action):
logging.debug("vm ip " + ip)
util.pread2(['ipset', action, ipsetname, ip])
except:
- logging.debug("vm ip alreday in ip set" + ip)
+ logging.debug("vm ip already in ip set" + ip)
continue
return result
@@ -1023,7 +1023,7 @@ def network_rules_for_rebooted_vm(session, vmName):
[vm_ip, vm_mac] = get_vm_mac_ip_from_log(vmchain)
default_arp_antispoof(vmchain, vifs, vm_ip, vm_mac)
- #check wether the vm has secondary ips
+ #check whether the vm has secondary ips
if is_secondary_ips_set(vm_name) == True:
vmips = get_vm_sec_ips(vm_name)
#add arp rules for the secondaryp ip
diff --git a/scripts/vm/hypervisor/xenserver/vmopsSnapshot
b/scripts/vm/hypervisor/xenserver/vmopsSnapshot
index d4cd4b98210..0d5fcc184c5 100755
--- a/scripts/vm/hypervisor/xenserver/vmopsSnapshot
+++ b/scripts/vm/hypervisor/xenserver/vmopsSnapshot
@@ -190,7 +190,7 @@ def isfile(path, isISCSI):
errMsg = ''
exists = True
if isISCSI:
- exists = checkVolumeAvailablility(path)
+ exists = checkVolumeAvailability(path)
else:
exists = os.path.isfile(path)
@@ -269,7 +269,7 @@ def getParentOfSnapshot(snapshotUuid, primarySRPath,
isISCSI):
baseCopyUuid = ''
if isISCSI:
- checkVolumeAvailablility(snapshotPath)
+ checkVolumeAvailability(snapshotPath)
baseCopyUuid = scanParent(snapshotPath)
else:
baseCopyUuid = getParent(snapshotPath, isISCSI)
@@ -439,7 +439,7 @@ def manageAvailability(path, value):
return
-def checkVolumeAvailablility(path):
+def checkVolumeAvailability(path):
try:
if not isVolumeAvailable(path):
# The VHD file is not available on XenSever. The volume is probably
diff --git a/server/src/main/java/com/cloud/configuration/Config.java
b/server/src/main/java/com/cloud/configuration/Config.java
index f1b58360618..2d677042b62 100644
--- a/server/src/main/java/com/cloud/configuration/Config.java
+++ b/server/src/main/java/com/cloud/configuration/Config.java
@@ -1729,7 +1729,7 @@ public enum Config {
String.class,
"baremetal.ipmi.fail.retry",
"5",
- "ipmi interface will be temporary out of order after power
opertions(e.g. cycle, on), it leads following commands fail immediately. The
value specifies retry times before accounting it as real failure",
+ "ipmi interface will be temporary out of order after power
operations(e.g. cycle, on), it leads following commands fail immediately. The
value specifies retry times before accounting it as real failure",
null),
ApiLimitEnabled("Advanced", ManagementServer.class, Boolean.class,
"api.throttling.enabled", "false", "Enable/disable Api rate limit", null),
diff --git a/server/src/main/java/com/cloud/network/NetworkServiceImpl.java
b/server/src/main/java/com/cloud/network/NetworkServiceImpl.java
index 4d439ceda43..c29d3f55813 100644
--- a/server/src/main/java/com/cloud/network/NetworkServiceImpl.java
+++ b/server/src/main/java/com/cloud/network/NetworkServiceImpl.java
@@ -5118,7 +5118,7 @@ public class NetworkServiceImpl extends ManagerBase
implements NetworkService, C
List<SecondaryStorageVmVO> ssvms =
_stnwMgr.getSSVMWithNoStorageNetwork(network.getDataCenterId());
if (!ssvms.isEmpty()) {
StringBuilder sb = new StringBuilder("Cannot add " +
trafficType
- + " traffic type as there are below secondary storage
vm still running. Please stop them all and add Storage traffic type again, then
destory them all to allow CloudStack recreate them with storage network(If you
have added storage network ip range)");
+ + " traffic type as there are below secondary storage
vm still running. Please stop them all and add Storage traffic type again, then
destroy them all to allow CloudStack recreate them with storage network(If you
have added storage network ip range)");
sb.append("SSVMs:");
for (SecondaryStorageVmVO ssvm : ssvms) {
sb.append(ssvm.getInstanceName()).append(":").append(ssvm.getState());
diff --git
a/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java
b/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java
index a0e8a2a2f0d..d49322bde60 100644
---
a/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java
+++
b/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java
@@ -2533,7 +2533,7 @@ Configurable, StateListener<VirtualMachine.State,
VirtualMachine.Event, VirtualM
boolean revoke = false;
if (ip.getState() == IpAddress.State.Releasing ) {
// for ips got struck in releasing state we need
to delete the rule not add.
- s_logger.debug("Rule revoke set to true for the ip
" + ip.getAddress() +" becasue it is in releasing state");
+ s_logger.debug("Rule revoke set to true for the ip
" + ip.getAddress() +" because it is in releasing state");
revoke = true;
}
final StaticNatImpl staticNat = new
StaticNatImpl(ip.getAccountId(), ip.getDomainId(), guestNetworkId, ip.getId(),
ip.getVmIp(), revoke);
diff --git a/server/src/main/java/com/cloud/network/rules/RulesManagerImpl.java
b/server/src/main/java/com/cloud/network/rules/RulesManagerImpl.java
index 115364c527a..624fbfb9d24 100644
--- a/server/src/main/java/com/cloud/network/rules/RulesManagerImpl.java
+++ b/server/src/main/java/com/cloud/network/rules/RulesManagerImpl.java
@@ -670,7 +670,7 @@ public class RulesManagerImpl extends ManagerBase
implements RulesManager, Rules
" as it's already assigned to antoher vm");
}
- //check wether the vm ip is alreday associated with any public ip
address
+ //check whether the vm ip is already associated with any public ip
address
IPAddressVO oldIP = _ipAddressDao.findByAssociatedVmIdAndVmIp(vmId,
vmIp);
if (oldIP != null) {
diff --git
a/server/src/test/java/com/cloud/network/vpc/NetworkACLServiceImplTest.java
b/server/src/test/java/com/cloud/network/vpc/NetworkACLServiceImplTest.java
index be48bf92933..8dd3f32d6c4 100644
--- a/server/src/test/java/com/cloud/network/vpc/NetworkACLServiceImplTest.java
+++ b/server/src/test/java/com/cloud/network/vpc/NetworkACLServiceImplTest.java
@@ -1170,7 +1170,7 @@ public class NetworkACLServiceImplTest {
}
@Test(expected = InvalidParameterValueException.class)
- public void
moveRuleBetweenAclRulesTestThereIsSpaceBetweenPreviousRuleAndNextRuleToAccomodateTheNewRuleWithOtherruleColliding()
{
+ public void
moveRuleBetweenAclRulesTestThereIsSpaceBetweenPreviousRuleAndNextRuleToAccommodateTheNewRuleWithOtherRuleColliding()
{
Mockito.when(previousAclRuleMock.getNumber()).thenReturn(10);
Mockito.when(nextAclRuleMock.getNumber()).thenReturn(15);
@@ -1186,7 +1186,7 @@ public class NetworkACLServiceImplTest {
}
@Test
- public void
moveRuleBetweenAclRulesTestThereIsSpaceBetweenPreviousRuleAndNextRuleToAccomodateTheNewRule()
{
+ public void
moveRuleBetweenAclRulesTestThereIsSpaceBetweenPreviousRuleAndNextRuleToAccommodateTheNewRule()
{
Mockito.when(previousAclRuleMock.getNumber()).thenReturn(10);
Mockito.when(nextAclRuleMock.getNumber()).thenReturn(11);
Mockito.when(aclRuleBeingMovedMock.getId()).thenReturn(1l);
@@ -1218,7 +1218,7 @@ public class NetworkACLServiceImplTest {
}
@Test
- public void
moveRuleBetweenAclRulesTestThereIsNoSpaceBetweenPreviousRuleAndNextRuleToAccomodateTheNewRule()
{
+ public void
moveRuleBetweenAclRulesTestThereIsNoSpaceBetweenPreviousRuleAndNextRuleToAccommodateTheNewRule()
{
Mockito.when(previousAclRuleMock.getNumber()).thenReturn(10);
Mockito.when(nextAclRuleMock.getNumber()).thenReturn(15);
Mockito.when(aclRuleBeingMovedMock.getNumber()).thenReturn(50);
diff --git a/setup/db/create-schema.sql b/setup/db/create-schema.sql
index f20a52c6952..3f14fccd010 100755
--- a/setup/db/create-schema.sql
+++ b/setup/db/create-schema.sql
@@ -1927,7 +1927,7 @@ CREATE TABLE `cloud`.`keystore` (
`id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
`name` varchar(64) NOT NULL COMMENT 'unique name for the certifiation',
`certificate` text NOT NULL COMMENT 'the actual certificate being stored in
the db',
- `key` text COMMENT 'private key associated wih the certificate',
+ `key` text COMMENT 'private key associated with the certificate',
`domain_suffix` varchar(256) NOT NULL COMMENT 'DNS domain suffix associated
with the certificate',
`seq` int,
PRIMARY KEY (`id`),
diff --git a/setup/db/templates.sql b/setup/db/templates.sql
index aa0bd1d4e9d..3f154acc797 100755
--- a/setup/db/templates.sql
+++ b/setup/db/templates.sql
@@ -147,7 +147,7 @@ INSERT INTO `cloud`.`guest_os` (id, uuid, category_id,
display_name) VALUES (88,
INSERT INTO `cloud`.`guest_os` (id, uuid, category_id, display_name) VALUES
(89, UUID(), 6, 'Windows Server 2003 Standard Edition(32-bit)');
INSERT INTO `cloud`.`guest_os` (id, uuid, category_id, display_name) VALUES
(90, UUID(), 6, 'Windows Server 2003 Standard Edition(64-bit)');
INSERT INTO `cloud`.`guest_os` (id, uuid, category_id, display_name) VALUES
(91, UUID(), 6, 'Windows Server 2003 Web Edition');
-INSERT INTO `cloud`.`guest_os` (id, uuid, category_id, display_name) VALUES
(92, UUID(), 6, 'Microsoft Small Bussiness Server 2003');
+INSERT INTO `cloud`.`guest_os` (id, uuid, category_id, display_name) VALUES
(92, UUID(), 6, 'Microsoft Small Business Server 2003');
INSERT INTO `cloud`.`guest_os` (id, uuid, category_id, display_name) VALUES
(93, UUID(), 6, 'Windows XP (32-bit)');
INSERT INTO `cloud`.`guest_os` (id, uuid, category_id, display_name) VALUES
(94, UUID(), 6, 'Windows XP (64-bit)');
INSERT INTO `cloud`.`guest_os` (id, uuid, category_id, display_name) VALUES
(95, UUID(), 6, 'Windows 2000 Advanced Server');
@@ -395,7 +395,7 @@ INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type,
guest_os_name, guest
INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name,
guest_os_id) VALUES ("VmWare", 'Microsoft Windows Server 2003, Standard Edition
(32-bit)', 89);
INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name,
guest_os_id) VALUES ("VmWare", 'Microsoft Windows Server 2003, Standard Edition
(64-bit)', 90);
INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name,
guest_os_id) VALUES ("VmWare", 'Microsoft Windows Server 2003, Web Edition',
91);
-INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name,
guest_os_id) VALUES ("VmWare", 'Microsoft Small Bussiness Server 2003', 92);
+INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name,
guest_os_id) VALUES ("VmWare", 'Microsoft Small Business Server 2003', 92);
INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name,
guest_os_id) VALUES ("VmWare", 'Microsoft Windows Vista (32-bit)', 56);
INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name,
guest_os_id) VALUES ("VmWare", 'Microsoft Windows Vista (64-bit)', 101);
INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name,
guest_os_id) VALUES ("VmWare", 'Microsoft Windows XP Professional (32-bit)',
93);
diff --git a/test/integration/component/test_browse_volumes.py
b/test/integration/component/test_browse_volumes.py
index a1ee938bb28..1cf3bacfc93 100644
--- a/test/integration/component/test_browse_volumes.py
+++ b/test/integration/component/test_browse_volumes.py
@@ -2083,9 +2083,9 @@ class TestBrowseUploadVolume(cloudstackTestCase):
vm4details = self.deploy_vm()
- newvolumetodestoy_VM = self.browse_upload_volume()
+ newvolumetodestroy_VM = self.browse_upload_volume()
- self.attach_volume(vm4details, newvolumetodestoy_VM.id)
+ self.attach_volume(vm4details, newvolumetodestroy_VM.id)
self.destroy_vm(vm4details)
@@ -2095,7 +2095,7 @@ class TestBrowseUploadVolume(cloudstackTestCase):
self.expunge_vm(vm4details)
cmd = deleteVolume.deleteVolumeCmd()
- cmd.id = newvolumetodestoy_VM.id
+ cmd.id = newvolumetodestroy_VM.id
self.apiclient.deleteVolume(cmd)
self.debug(
diff --git a/test/integration/component/test_vpc_vm_life_cycle.py
b/test/integration/component/test_vpc_vm_life_cycle.py
index 3d41dcadb48..abe6d194a54 100644
--- a/test/integration/component/test_vpc_vm_life_cycle.py
+++ b/test/integration/component/test_vpc_vm_life_cycle.py
@@ -627,7 +627,7 @@ class TestVMLifeCycleVPC(cloudstackTestCase):
"""
# Validate the following
- # 1. Destory the virtual machines.
+ # 1. Destroy the virtual machines.
# 2. Rules should be still configured on virtual router.
# 3. Recover the virtual machines.
# 4. Vm should be in stopped state. State both the instances
@@ -1751,7 +1751,7 @@ class TestVMLifeCycleStoppedVPCVR(cloudstackTestCase):
"""
# Validate the following
- # 1. Destory the virtual machines.
+ # 1. Destroy the virtual machines.
# 2. Rules should be still configured on virtual router.
# 3. Recover the virtual machines.
# 4. Vm should be in stopped state. State both the instances
@@ -2466,7 +2466,7 @@ class TestVMLifeCycleDiffHosts(cloudstackTestCase):
"""
# Validate the following
- # 1. Destory the virtual machines.
+ # 1. Destroy the virtual machines.
# 2. Rules should be still configured on virtual router.
# 3. Recover the virtual machines.
# 4. Vm should be in stopped state. State both the instances
diff --git a/usage/src/test/resources/cloud1.xml
b/usage/src/test/resources/cloud1.xml
index 773283ac5c5..cf79f953884 100644
--- a/usage/src/test/resources/cloud1.xml
+++ b/usage/src/test/resources/cloud1.xml
@@ -11,7 +11,7 @@
<dataset>
<configuration name="usage.stats.job.aggregation.range" value="600"
instance="test"/>
- <vm_instance type="User" id="8" account_id="1" domain_id="1" name="test"
instance_name="test" state="destoyed" guest_os_id="1" service_offering_id="1"
data_center_id="1" vnc_password="xyz" vm_type="User" created="2019-01-01
00:00:01" removed="2018-01-01 00:00:01" />
+ <vm_instance type="User" id="8" account_id="1" domain_id="1" name="test"
instance_name="test" state="destroyed" guest_os_id="1" service_offering_id="1"
data_center_id="1" vnc_password="xyz" vm_type="User" created="2019-01-01
00:00:01" removed="2018-01-01 00:00:01" />
<volumes id="16" account_id="1" domain_id="1" size="1" data_center_id="1"
volume_type="root" disk_offering_id="1" removed="2018-01-01 00:00:01"/>
<volumes id="17" account_id="1" domain_id="1" size="1" data_center_id="1"
volume_type="root" disk_offering_id="1" removed="2019-01-01 00:00:01"/>
diff --git a/usage/src/test/resources/cloud2.xml
b/usage/src/test/resources/cloud2.xml
index 099dde5737c..e8190b13984 100644
--- a/usage/src/test/resources/cloud2.xml
+++ b/usage/src/test/resources/cloud2.xml
@@ -11,7 +11,7 @@
<dataset>
<configuration name="usage.stats.job.aggregation.range" value="600"
instance="test" />
- <vm_instance type="User" id="8" account_id="1" domain_id="1" name="test"
instance_name="test" state="destoyed" guest_os_id="1" service_offering_id="1"
data_center_id="1" vnc_password="xyz" vm_type="User" created="2019-01-01
00:00:01" removed="2018-01-01 00:00:01" />
+ <vm_instance type="User" id="8" account_id="1" domain_id="1" name="test"
instance_name="test" state="destroyed" guest_os_id="1" service_offering_id="1"
data_center_id="1" vnc_password="xyz" vm_type="User" created="2019-01-01
00:00:01" removed="2018-01-01 00:00:01" />
<volumes id="16" account_id="1" domain_id="1" size="1" data_center_id="1"
volume_type="root" disk_offering_id="1" removed="2018-01-01 00:00:01"/>
<volumes id="17" account_id="1" domain_id="1" size="1" data_center_id="1"
volume_type="root" disk_offering_id="1" removed="2018-01-01 00:00:01"/>
diff --git a/utils/src/test/java/com/cloud/utils/net/Ip4AddressTest.java
b/utils/src/test/java/com/cloud/utils/net/Ip4AddressTest.java
index 79ff238785a..63a5216fed8 100644
--- a/utils/src/test/java/com/cloud/utils/net/Ip4AddressTest.java
+++ b/utils/src/test/java/com/cloud/utils/net/Ip4AddressTest.java
@@ -33,8 +33,8 @@ public class Ip4AddressTest {
@Test
public void testIsSameAddressAs() {
- Assert.assertTrue("1 and one should be considdered the same address",
new Ip4Address(1L, 5L).isSameAddressAs("0.0.0.1"));
- Assert.assertFalse("zero and 0L should be considdered the same address
but a Long won't be accepted", new Ip4Address("0.0.0.0",
"00:00:00:00:00:08").isSameAddressAs(0L));
+ Assert.assertTrue("1 and one should be considered the same address",
new Ip4Address(1L, 5L).isSameAddressAs("0.0.0.1"));
+ Assert.assertFalse("zero and 0L should be considered the same address
but a Long won't be accepted", new Ip4Address("0.0.0.0",
"00:00:00:00:00:08").isSameAddressAs(0L));
}
}
diff --git a/utils/src/test/java/com/cloud/utils/net/IpTest.java
b/utils/src/test/java/com/cloud/utils/net/IpTest.java
index 89608f1967f..3ff729351bc 100644
--- a/utils/src/test/java/com/cloud/utils/net/IpTest.java
+++ b/utils/src/test/java/com/cloud/utils/net/IpTest.java
@@ -56,8 +56,8 @@ public class IpTest {
@Test
public void testIsSameAddressAs() {
- Assert.assertTrue("1 and one should be considdered the same address",
new Ip(1L).isSameAddressAs("0.0.0.1"));
- Assert.assertTrue("zero and 0L should be considdered the same
address", new Ip("0.0.0.0").isSameAddressAs(0L));
+ Assert.assertTrue("1 and one should be considered the same address",
new Ip(1L).isSameAddressAs("0.0.0.1"));
+ Assert.assertTrue("zero and 0L should be considered the same address",
new Ip("0.0.0.0").isSameAddressAs(0L));
}
}