rhtyd commented on a change in pull request #4250:
URL: https://github.com/apache/cloudstack/pull/4250#discussion_r471344055



##########
File path: api/src/main/java/com/cloud/storage/ImageStore.java
##########
@@ -21,6 +21,13 @@
 
 public interface ImageStore extends Identity, InternalIdentity {
 
+    String ACS_PROPERTY_PREFIX = "ACS-property-";
+    String REQUIRED_NETWORK_PREFIX = "ACS-network-";
+    String DISK_DEFINITION_PREFIX = "ACS-disk-";
+    String OVF_HARDWARE_CONFIGURATION_PREFIX = "ACS-configuration-";
+    String OVF_HARDWARE_ITEM_PREFIX = "ACS-hardware-item-";
+    String OVF_EULA_SECTION_PREFIX = "ACS-eula-";
+

Review comment:
       Minor nit - @nvazquez could this use a lowererd-case dot-joined like 
many other cloudstack such as `acs.property.`?

##########
File path: api/src/main/java/com/cloud/agent/api/storage/OVFHelper.java
##########
@@ -138,103 +155,88 @@ protected OVFPropertyTO createOVFPropertyFromNode(Node 
node) {
     }
 
     /**
-     * Get properties from OVF file located on ovfFilePath
+     * Get properties from OVF XML string
      */
-    public List<OVFPropertyTO> getOVFPropertiesFromFile(String ovfFilePath) 
throws ParserConfigurationException, IOException, SAXException {
-        if (StringUtils.isBlank(ovfFilePath)) {
-            return new ArrayList<>();
-        }
-        File ovfFile = new File(ovfFilePath);
-        final Document doc = 
DocumentBuilderFactory.newInstance().newDocumentBuilder().parse(ovfFile);
+    protected List<OVFPropertyTO> getOVFPropertiesFromXmlString(final String 
ovfString) throws ParserConfigurationException, IOException, SAXException {
+        InputSource is = new InputSource(new StringReader(ovfString));
+        final Document doc = 
DocumentBuilderFactory.newInstance().newDocumentBuilder().parse(is);
         return getConfigurableOVFPropertiesFromDocument(doc);
     }
 
-    /**
-     * Get properties from OVF XML string
-     */
-    protected List<OVFPropertyTO> getOVFPropertiesXmlString(final String 
ovfFilePath) throws ParserConfigurationException, IOException, SAXException {
-        InputSource is = new InputSource(new StringReader(ovfFilePath));
+    protected List<OVFConfigurationTO> 
getOVFDeploymentOptionsFromXmlString(final String ovfString) throws 
ParserConfigurationException, IOException, SAXException {
+        InputSource is = new InputSource(new StringReader(ovfString));
         final Document doc = 
DocumentBuilderFactory.newInstance().newDocumentBuilder().parse(is);
-        return getConfigurableOVFPropertiesFromDocument(doc);
+        return getDeploymentOptionsFromDocumentTree(doc);
     }
 
-    public List<DatadiskTO> getOVFVolumeInfo(final String ovfFilePath) {
+    protected List<OVFVirtualHardwareItemTO> 
getOVFVirtualHardwareSectionFromXmlString(final String ovfString) throws 
ParserConfigurationException, IOException, SAXException {
+        InputSource is = new InputSource(new StringReader(ovfString));
+        final Document doc = 
DocumentBuilderFactory.newInstance().newDocumentBuilder().parse(is);
+        return getVirtualHardwareItemsFromDocumentTree(doc);
+    }
+
+    protected List<OVFEulaSectionTO> getOVFEulaSectionFromXmlString(final 
String ovfString) throws ParserConfigurationException, IOException, 
SAXException {
+        InputSource is = new InputSource(new StringReader(ovfString));
+        final Document doc = 
DocumentBuilderFactory.newInstance().newDocumentBuilder().parse(is);
+        return getEulaSectionsFromDocument(doc);
+    }
+
+    public List<DatadiskTO> getOVFVolumeInfoFromFile(final String ovfFilePath) 
throws InternalErrorException {
         if (StringUtils.isBlank(ovfFilePath)) {
-            return new ArrayList<DatadiskTO>();
+            return new ArrayList<>();
+        }
+        Document doc = getDocumentFromFile(ovfFilePath);
+
+        return getOVFVolumeInfoFromFile(ovfFilePath, doc);
+    }
+
+    public List<DatadiskTO> getOVFVolumeInfoFromFile(String ovfFilePath, 
Document doc) throws InternalErrorException {
+        if (org.apache.commons.lang.StringUtils.isBlank(ovfFilePath)) {
+            return null;
         }
-        ArrayList<OVFFile> vf = new ArrayList<OVFFile>();
-        ArrayList<OVFDisk> vd = new ArrayList<OVFDisk>();
 
         File ovfFile = new File(ovfFilePath);
-        try {
-            final Document doc = 
DocumentBuilderFactory.newInstance().newDocumentBuilder().parse(new 
File(ovfFilePath));
-            NodeList disks = doc.getElementsByTagName("Disk");
-            NodeList files = doc.getElementsByTagName("File");
-            NodeList items = doc.getElementsByTagName("Item");
-            boolean toggle = true;
-            for (int j = 0; j < files.getLength(); j++) {
-                Element file = (Element)files.item(j);
-                OVFFile of = new OVFFile();
-                of._href = file.getAttribute("ovf:href");
-                if (of._href.endsWith("vmdk") || of._href.endsWith("iso")) {
-                    of._id = file.getAttribute("ovf:id");
-                    String size = file.getAttribute("ovf:size");
-                    if (StringUtils.isNotBlank(size)) {
-                        of._size = Long.parseLong(size);
-                    } else {
-                        String dataDiskPath = ovfFile.getParent() + 
File.separator + of._href;
-                        File this_file = new File(dataDiskPath);
-                        of._size = this_file.length();
-                    }
-                    of.isIso = of._href.endsWith("iso");
-                    if (toggle && !of.isIso) {
-                        of._bootable = true;
-                        toggle = !toggle;
-                    }
-                    vf.add(of);
-                }
-            }
-            for (int i = 0; i < disks.getLength(); i++) {
-                Element disk = (Element)disks.item(i);
-                OVFDisk od = new OVFDisk();
-                String virtualSize = disk.getAttribute("ovf:capacity");
-                od._capacity = NumberUtils.toLong(virtualSize, 0L);
-                String allocationUnits = 
disk.getAttribute("ovf:capacityAllocationUnits");
-                od._diskId = disk.getAttribute("ovf:diskId");
-                od._fileRef = disk.getAttribute("ovf:fileRef");
-                od._populatedSize = 
NumberUtils.toLong(disk.getAttribute("ovf:populatedSize"));
-
-                if ((od._capacity != 0) && (allocationUnits != null)) {
-
-                    long units = 1;
-                    if (allocationUnits.equalsIgnoreCase("KB") || 
allocationUnits.equalsIgnoreCase("KiloBytes") || 
allocationUnits.equalsIgnoreCase("byte * 2^10")) {
-                        units = ResourceType.bytesToKiB;
-                    } else if (allocationUnits.equalsIgnoreCase("MB") || 
allocationUnits.equalsIgnoreCase("MegaBytes") || 
allocationUnits.equalsIgnoreCase("byte * 2^20")) {
-                        units = ResourceType.bytesToMiB;
-                    } else if (allocationUnits.equalsIgnoreCase("GB") || 
allocationUnits.equalsIgnoreCase("GigaBytes") || 
allocationUnits.equalsIgnoreCase("byte * 2^30")) {
-                        units = ResourceType.bytesToGiB;
-                    }
-                    od._capacity = od._capacity * units;
-                }
-                od._controller = getControllerType(items, od._diskId);
-                vd.add(od);
-            }
+        NodeList disks = doc.getElementsByTagName("Disk");
+        NodeList files = doc.getElementsByTagName("File");
+        NodeList items = doc.getElementsByTagName("Item");
+        List<OVFFile> vf = extractFilesFromOvfDocumentTree(ovfFile, files);
 
-        } catch (SAXException | IOException | ParserConfigurationException e) {
-            s_logger.error("Unexpected exception caught while parsing ovf 
file:" + ovfFilePath, e);
-            throw new CloudRuntimeException(e);
+        List<OVFDisk> vd = extractDisksFromOvfDocumentTree(disks, items);
+
+        List<DatadiskTO> diskTOs = 
matchDisksToFilesAndGenerateDiskTOs(ovfFile, vf, vd);
+
+        moveFirstIsoToEndOfDiskList(diskTOs);
+
+        return diskTOs;
+    }
+
+    /**
+     * check if first disk is an iso move it to the end. the semantics of this 
are not complete as more than one ISO may be there and theoretically an OVA may 
only contain ISOs
+     *
+     */
+    private void moveFirstIsoToEndOfDiskList(List<DatadiskTO> diskTOs) {
+        DatadiskTO fd = diskTOs.get(0);

Review comment:
       Should we check for diskTOs for NPE?

##########
File path: 
api/src/main/java/org/apache/cloudstack/api/net/NetworkPrerequisiteTO.java
##########
@@ -0,0 +1,124 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.api.net;
+
+/**
+ * container for the network prerequisites as found in the appliance template
+ *
+ * for OVA:
+ * {code}
+ * <Network ovf:name="Management0-0">
+ *   <Description>Management Network Interface</Description>
+ * </Network>
+ * {code}
+ * {code}
+ * <Item>
+ *   <rasd:AddressOnParent>7</rasd:AddressOnParent>
+ *   <rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
+ *   <rasd:Connection>Management0-0</rasd:Connection>
+ *   <rasd:Description>E1000 Ethernet adapter on "Management 
Network"</rasd:Description>
+ *   <rasd:ElementName>Network adapter 1</rasd:ElementName>
+ *   <rasd:InstanceID>6</rasd:InstanceID>
+ *   <rasd:ResourceSubType>E1000</rasd:ResourceSubType>
+ *   <rasd:ResourceType>10</rasd:ResourceType>
+ * </Item>
+ * {code}
+ */
+public class NetworkPrerequisiteTO {
+    String name; // attribute on Network should match <rasd:Connection> on 
Item (virtual hardware)
+    String networkDescription;
+
+    int addressOnParent; // or String?
+    boolean automaticAllocation;
+    String nicDescription;
+    String elementName;
+    int InstanceID; // or String?
+    String resourceSubType;
+    String resourceType; // or int?

Review comment:
       @nvazquez can you cleanup comments?

##########
File path: api/src/main/java/com/cloud/agent/api/storage/OVFHelper.java
##########
@@ -404,6 +488,270 @@ OVFDisk getDisk(String fileRef, List<OVFDisk> disks) {
         return null;
     }
 
+    public List<NetworkPrerequisiteTO> 
getNetPrerequisitesFromDocument(Document doc) throws InternalErrorException {
+        if (doc == null) {
+            if (s_logger.isTraceEnabled()) {
+                s_logger.trace("no document to parse; returning no prerequiste 
networks");
+            }
+            return Collections.emptyList();
+        }
+
+        Map<String, NetworkPrerequisiteTO> nets = 
getNetworksFromDocumentTree(doc);
+
+        checkForOnlyOneSystemNode(doc);
+
+        matchNicsToNets(nets, doc);
+
+        return new ArrayList<>(nets.values());
+    }
+
+    private void matchNicsToNets(Map<String, NetworkPrerequisiteTO> nets, Node 
systemElement) {
+        final DocumentTraversal traversal = (DocumentTraversal) systemElement;
+        final NodeIterator iterator = 
traversal.createNodeIterator(systemElement, NodeFilter.SHOW_ELEMENT, null, 
true);
+        if (s_logger.isTraceEnabled()) {
+            s_logger.trace(String.format("starting out with %d 
network-prerequisites, parsing hardware",nets.size()));
+        }
+        int nicCount = 0;
+        for (Node n = iterator.nextNode(); n != null; n = iterator.nextNode()) 
{
+            final Element e = (Element) n;
+            if ("rasd:Connection".equals(e.getTagName())) {
+                nicCount++;
+                String name = e.getTextContent(); // should be in our nets
+                if(nets.get(name) == null) {
+                    if(s_logger.isInfoEnabled()) {
+                        s_logger.info(String.format("found a nic definition 
without a network definition byname %s, adding it to the list.", name));
+                    }
+                    nets.put(name, new NetworkPrerequisiteTO());
+                }
+                NetworkPrerequisiteTO thisNet = nets.get(name);
+                if (e.getParentNode() != null) {
+                    fillNicPrerequisites(thisNet,e.getParentNode());
+                }
+            }
+        }
+        if (s_logger.isTraceEnabled()) {
+            s_logger.trace(String.format("ending up with %d 
network-prerequisites, parsed %d nics", nets.size(), nicCount));
+        }
+    }
+
+    /**
+     * get all the stuff from parent node
+     * TODO check for completeness and optionality
+     *
+     * @param nic the object to carry through the system
+     * @param parentNode the xml container node for nic data
+     */
+    private void fillNicPrerequisites(NetworkPrerequisiteTO nic, Node 
parentNode) {
+//     *   <rasd:AddressOnParent>7</rasd:AddressOnParent>
+        try {

Review comment:
       @nvazquez can you fix the comments indents or remove if not necessary

##########
File path: 
api/src/main/java/org/apache/cloudstack/api/command/user/template/ListTemplatesCmd.java
##########
@@ -99,7 +99,7 @@
     public EnumSet<ApiConstants.DomainDetails> getDetails() throws 
InvalidParameterValueException {
         EnumSet<ApiConstants.DomainDetails> dv;
         if (CollectionUtils.isEmpty(viewDetails)) {
-            dv = EnumSet.of(ApiConstants.DomainDetails.min);
+            dv = EnumSet.of(ApiConstants.DomainDetails.all);

Review comment:
       @nvazquez can we use `min` when we don't want details by default, 
otherwise return all details like old behaviour?

##########
File path: 
api/src/main/java/org/apache/cloudstack/api/command/user/vm/DeployVMCmd.java
##########
@@ -221,10 +224,16 @@
     @Parameter(name = ApiConstants.COPY_IMAGE_TAGS, type = 
CommandType.BOOLEAN, since = "4.13", description = "if true the image tags (if 
any) will be copied to the VM, default value is false")
     private Boolean copyImageTags;
 
-    @Parameter(name = ApiConstants.OVF_PROPERTIES, type = CommandType.MAP, 
since = "4.13",
-            description = "used to specify the OVF properties.")
+    @Parameter(name = ApiConstants.PROPERTIES, type = CommandType.MAP, since = 
"4.15",
+            description = "used to specify the vApp properties.")
     @LogLevel(LogLevel.Log4jLevel.Off)
-    private Map vmOvfProperties;
+    private Map vAppProperties;
+
+    @Parameter(name = ApiConstants.NIC_NETWORK_LIST, type = CommandType.MAP, 
since = "4.15",
+            description = "used to specify the vApp network mapping." +

Review comment:
       Should add an extra line to explain what we mean by vApp (i.e. network 
mapping of a vApp VMware template?)

##########
File path: core/src/main/java/com/cloud/storage/resource/StorageProcessor.java
##########
@@ -33,6 +33,10 @@
 import com.cloud.agent.api.Answer;
 
 public interface StorageProcessor {
+
+    String REQUEST_TEMPLATE_RELOAD = "request template reload";
+    String COPY_NOT_NEEDED_FOR_DEPLOY_AS_IS = "copy volume not needed for 
deploy as is";

Review comment:
       @nvazquez where and how is this used?

##########
File path: core/src/main/java/com/cloud/storage/template/OVAProcessor.java
##########
@@ -53,73 +64,142 @@ public FormatInfo process(String templatePath, ImageFormat 
format, String templa
 
     @Override
     public FormatInfo process(String templatePath, ImageFormat format, String 
templateName, long processTimeout) throws InternalErrorException {
-        if (format != null) {
-            if (s_logger.isInfoEnabled()) {
-                s_logger.info("We currently don't handle conversion from " + 
format + " to OVA.");
-            }
+        if (! conversionChecks(format)){
             return null;
         }
 
-        s_logger.info("Template processing. templatePath: " + templatePath + 
", templateName: " + templateName);
+        LOGGER.info("Template processing. templatePath: " + templatePath + ", 
templateName: " + templateName);
         String templateFilePath = templatePath + File.separator + templateName 
+ "." + ImageFormat.OVA.getFileExtension();
         if (!_storage.exists(templateFilePath)) {
-            if (s_logger.isInfoEnabled()) {
-                s_logger.info("Unable to find the vmware template file: " + 
templateFilePath);
+            if (LOGGER.isInfoEnabled()) {
+                LOGGER.info("Unable to find the vmware template file: " + 
templateFilePath);
             }
             return null;
         }
 
-        s_logger.info("Template processing - untar OVA package. templatePath: 
" + templatePath + ", templateName: " + templateName);
-        String templateFileFullPath = templatePath + File.separator + 
templateName + "." + ImageFormat.OVA.getFileExtension();
-        File templateFile = new File(templateFileFullPath);
-        Script command = new Script("tar", processTimeout, s_logger);
-        command.add("--no-same-owner");
-        command.add("--no-same-permissions");
-        command.add("-xf", templateFileFullPath);
-        command.setWorkDir(templateFile.getParent());
-        String result = command.execute();
-        if (result != null) {
-            s_logger.info("failed to untar OVA package due to " + result + ". 
templatePath: " + templatePath + ", templateName: " + templateName);
-            throw new InternalErrorException("failed to untar OVA package");
+        String templateFileFullPath = unpackOva(templatePath, templateName, 
processTimeout);
+
+        setFileSystemAccessRights(templatePath);
+
+        FormatInfo info = createFormatInfo(templatePath, templateName, 
templateFilePath, templateFileFullPath);
+
+        // The intention is to use the ova file as is for deployment and use 
processing result only for
+        // - property assessment and
+        // - reconsiliation of
+        // - - disks,
+        // - - networks and
+        // - - compute dimensions.

Review comment:
       @nvazquez check and remove comments

##########
File path: 
engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java
##########
@@ -745,87 +749,169 @@ public void doInTransactionWithoutResult(final 
TransactionStatus status) {
     @Override
     @DB
     public void allocate(final VirtualMachineProfile vm, final LinkedHashMap<? 
extends Network, List<? extends NicProfile>> networks, final Map<String, 
Map<Integer, String>> extraDhcpOptions) throws InsufficientCapacityException,
-    ConcurrentOperationException {
+            ConcurrentOperationException {
 
         Transaction.execute(new 
TransactionCallbackWithExceptionNoReturn<InsufficientCapacityException>() {
             @Override
             public void doInTransactionWithoutResult(final TransactionStatus 
status) throws InsufficientCapacityException {
-                int deviceId = 0;
-                int size = 0;
-                for (final Network ntwk : networks.keySet()) {
-                    final List<? extends NicProfile> profiles = 
networks.get(ntwk);
-                    if (profiles != null && !profiles.isEmpty()) {
-                        size = size + profiles.size();
-                    } else {
-                        size = size + 1;
-                    }
+                if (s_logger.isTraceEnabled()) {
+                    s_logger.trace(String.format("allocating networks for 
%s(template %s); %d networks",vm.getInstanceName(), vm.getTemplate().getUuid(), 
networks.size()));
                 }
+                int deviceId = 0;
+                int size;
+                size = determineNumberOfNicsRequired();
 
                 final boolean[] deviceIds = new boolean[size];
                 Arrays.fill(deviceIds, false);
 
+                List<Pair<Network, NicProfile>> profilesList = 
getOrderedNetworkNicProfileMapping(networks);
                 final List<NicProfile> nics = new ArrayList<NicProfile>(size);
                 NicProfile defaultNic = null;
+                Network nextNetwork = null;
+                for (Pair <Network, NicProfile> networkNicPair : profilesList) 
{
+                    nextNetwork = networkNicPair.first();
+                    Pair<NicProfile, Integer> newDeviceInfo = 
addRequestedNicToNicListWithDeviceNumberAndRetrieveDefaultDevice(networkNicPair.second(),
 deviceIds, deviceId, nextNetwork, nics, defaultNic);
+                    defaultNic = newDeviceInfo.first();
+                    deviceId = newDeviceInfo.second();
+                }
+                createExtraNics(size, nics, nextNetwork);
+
+                if (nics.size() == 1) {
+                    nics.get(0).setDefaultNic(true);
+                }
+            }
 
+            /**
+             * private transaction method to check and add devices to the nic 
list and update the info
+             */
+            Pair<NicProfile,Integer> 
addRequestedNicToNicListWithDeviceNumberAndRetrieveDefaultDevice(NicProfile 
requested, boolean[] deviceIds, int deviceId, Network nextNetwork, 
List<NicProfile> nics, NicProfile defaultNic)
+                    throws InsufficientAddressCapacityException, 
InsufficientVirtualNetworkCapacityException {
+                Pair<NicProfile, Integer> rc = new Pair<>(null,null);
+                Boolean isDefaultNic = false;
+                if (vm != null && requested != null && 
requested.isDefaultNic()) {
+                    isDefaultNic = true;
+                }
+
+                while (deviceIds[deviceId] && deviceId < deviceIds.length) {
+                    deviceId++;
+                }
+
+                final Pair<NicProfile, Integer> vmNicPair = 
allocateNic(requested, nextNetwork, isDefaultNic, deviceId, vm);
+                NicProfile vmNic = null;
+                if (vmNicPair != null) {
+                    vmNic = vmNicPair.first();
+                    if (vmNic == null) {
+                        return rc;
+                    }
+                    deviceId = vmNicPair.second();
+                }
+
+                final int devId = vmNic.getDeviceId();
+                if (devId >= deviceIds.length) {
+                    throw new IllegalArgumentException("Device id for nic is 
too large: " + vmNic);
+                }
+                if (deviceIds[devId]) {
+                    throw new IllegalArgumentException("Conflicting device id 
for two different nics: " + vmNic);
+                }
+
+                deviceIds[devId] = true;
+
+                if (vmNic.isDefaultNic()) {
+                    if (defaultNic != null) {
+                        throw new IllegalArgumentException("You cannot specify 
two nics as default nics: nic 1 = " + defaultNic + "; nic 2 = " + vmNic);
+                    }
+                    defaultNic = vmNic;
+                }
+
+                nics.add(vmNic);
+                vm.addNic(vmNic);
+                saveExtraDhcpOptions(nextNetwork.getUuid(), vmNic.getId(), 
extraDhcpOptions);
+                rc.first(defaultNic);
+                rc.second(deviceId);
+                return rc;
+            }
+
+            /**
+             * private transaction method to get oredered list of Network and 
NicProfile pair
+             * @return ordered list of Network and NicProfile pair
+             * @param networks the map od networks to nic profiles list
+             */
+            private List<Pair<Network, NicProfile>> 
getOrderedNetworkNicProfileMapping(final LinkedHashMap<? extends Network, 
List<? extends NicProfile>> networks) {
+                List<Pair<Network, NicProfile>> profilesList = new 
ArrayList<>();
                 for (final Map.Entry<? extends Network, List<? extends 
NicProfile>> network : networks.entrySet()) {
-                    final Network config = network.getKey();
                     List<? extends NicProfile> requestedProfiles = 
network.getValue();
                     if (requestedProfiles == null) {
                         requestedProfiles = new ArrayList<NicProfile>();
                     }
                     if (requestedProfiles.isEmpty()) {
                         requestedProfiles.add(null);
                     }
-
                     for (final NicProfile requested : requestedProfiles) {
-                        Boolean isDefaultNic = false;
-                        if (vm != null && requested != null && 
requested.isDefaultNic()) {
-                            isDefaultNic = true;
-                        }
-
-                        while (deviceIds[deviceId] && deviceId < 
deviceIds.length) {
-                            deviceId++;
-                        }
-
-                        final Pair<NicProfile, Integer> vmNicPair = 
allocateNic(requested, config, isDefaultNic, deviceId, vm);
-                        NicProfile vmNic = null;
-                        if (vmNicPair != null) {
-                            vmNic = vmNicPair.first();
-                            if (vmNic == null) {
-                                continue;
-                            }
-                            deviceId = vmNicPair.second();
-                        }
-
-                        final int devId = vmNic.getDeviceId();
-                        if (devId >= deviceIds.length) {
-                            throw new IllegalArgumentException("Device id for 
nic is too large: " + vmNic);
-                        }
-                        if (deviceIds[devId]) {
-                            throw new IllegalArgumentException("Conflicting 
device id for two different nics: " + vmNic);
+                        profilesList.add(new Pair<Network, 
NicProfile>(network.getKey(), requested));
+                    }
+                }
+                profilesList.sort(new Comparator<Pair<Network, NicProfile>>() {
+                    @Override
+                    public int compare(Pair<Network, NicProfile> pair1, 
Pair<Network, NicProfile> pair2) {
+                        int profile1Order = Integer.MAX_VALUE;
+                        int profile2Order = Integer.MAX_VALUE;
+                        if (pair1 != null && pair1.second() != null && 
pair1.second().getOrderIndex() != null) {
+                            profile1Order = pair1.second().getOrderIndex();
                         }
-
-                        deviceIds[devId] = true;
-
-                        if (vmNic.isDefaultNic()) {
-                            if (defaultNic != null) {
-                                throw new IllegalArgumentException("You cannot 
specify two nics as default nics: nic 1 = " + defaultNic + "; nic 2 = " + 
vmNic);
-                            }
-                            defaultNic = vmNic;
+                        if (pair2 != null && pair2.second() != null && 
pair2.second().getOrderIndex() != null) {
+                            profile2Order = pair2.second().getOrderIndex();
                         }
+                        return profile1Order - profile2Order;
+                    }
+                });
+                return profilesList;
+            }
 
-                        nics.add(vmNic);
-                        vm.addNic(vmNic);
-                        saveExtraDhcpOptions(config.getUuid(), vmNic.getId(), 
extraDhcpOptions);
+            /**
+             * private transaction method to run over the objects and 
determine nic requirements
+             * @return the total numer of nics required
+             */
+            private int determineNumberOfNicsRequired() {
+                int size = 0;
+                for (final Network ntwk : networks.keySet()) {
+                    final List<? extends NicProfile> profiles = 
networks.get(ntwk);
+                    if (profiles != null && !profiles.isEmpty()) {
+                        size = size + profiles.size();
+                    } else {
+                        size = size + 1;
                     }
                 }
+
+                List<NetworkPrerequisiteTO> netprereqs = 
templateDetailsDao.listNetworkRequirementsByTemplateId(vm.getTemplate().getId());
+                // hack: add last network untill enough ids

Review comment:
       @nvazquez check and improve or remove the comment

##########
File path: core/src/main/java/com/cloud/resource/ServerResource.java
##########
@@ -31,6 +31,9 @@
  * ServerResource is a generic container to execute commands sent
  */
 public interface ServerResource extends Manager {
+
+    String ORIGINAL_FILE_EXTENSION = ".orig";

Review comment:
       @nvazquez where and how is this used?

##########
File path: api/src/main/java/org/apache/cloudstack/api/ApiConstants.java
##########
@@ -260,7 +260,8 @@
     public static final String OUTOFBANDMANAGEMENT_POWERSTATE = 
"outofbandmanagementpowerstate";
     public static final String OUTOFBANDMANAGEMENT_ENABLED = 
"outofbandmanagementenabled";
     public static final String OUTPUT = "output";
-    public static final String OVF_PROPERTIES = "ovfproperties";
+    public static final String PROPERTIES = "properties";
+    public static final String ACS_PROPERTY = "ACS-property";

Review comment:
       Is ACS_PROPERTY used by any API response?

##########
File path: 
api/src/main/java/org/apache/cloudstack/api/command/user/template/RegisterTemplateCmd.java
##########
@@ -162,6 +162,13 @@
                 description = "true if template should bypass Secondary 
Storage and be downloaded to Primary Storage on deployment")
     private Boolean directDownload;
 
+    @Parameter(name= ApiConstants.DEPLOY_AS_IS,
+            type = CommandType.BOOLEAN,
+            description = "true if template should not strip and define disks 
and networks but leave those to the template definition",

Review comment:
       Should we mention this is for VMware/ova only?

##########
File path: 
api/src/main/java/org/apache/cloudstack/api/response/TemplateResponse.java
##########
@@ -189,18 +191,28 @@
     @Param(description = "KVM Only: true if template is directly downloaded to 
Primary Storage bypassing Secondary Storage")
     private Boolean directDownload;
 
+    @SerializedName(ApiConstants.DEPLOY_AS_IS)
+    @Param(description = "VMware only: true if template is deployed without 
orchestrating disks and networks but \"as-is\" defined in the template.")
+    private Boolean deployAsIs;
+
     @SerializedName("parenttemplateid")
     @Param(description = "if Datadisk template, then id of the root disk 
template this template belongs to")
+    @Deprecated(since = "4.15")
     private String parentTemplateId;
 
     @SerializedName("childtemplates")
     @Param(description = "if root disk template, then ids of the datas disk 
templates this template owns")
+    @Deprecated(since = "4.15")
     private Set<ChildTemplateResponse> childTemplates;

Review comment:
       @nvazquez did we get rid of child-template design from old multi-disk 
ova code? Can this be removed?

##########
File path: 
plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java
##########
@@ -1904,7 +1924,22 @@ protected StartAnswer execute(StartCommand cmd) {
                 }
             }
 
+            // The number of disks changed must be 0 for install as is, as the 
VM is a clone
+            int disksChanges = !installAsIs ? disks.length : 0;
             int totalChangeDevices = disks.length + nics.length;
+            int hackDeviceCount = 0;
+            if (diskInfoBuilder != null) {
+                hackDeviceCount += diskInfoBuilder.getDiskCount();
+            }
+            hackDeviceCount += nicDevices == null ? 0 : nicDevices.length;
+            // vApp cdrom device
+            // HACK ALERT: ovf properties might not be the only or defining 
feature of vApps; needs checking

Review comment:
       @nvazquez this is not necessary if we're cloning a deploy as-is 
template, there is no need to add the hack to add another cdrom device. Please 
check and remove this code.




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


Reply via email to