[ 
https://issues.apache.org/jira/browse/CLOUDSTACK-4757?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16302691#comment-16302691
 ] 

ASF GitHub Bot commented on CLOUDSTACK-4757:
--------------------------------------------

nvazquez commented on a change in pull request #2146: CLOUDSTACK-4757: Support 
OVA files with multiple disks for templates
URL: https://github.com/apache/cloudstack/pull/2146#discussion_r158594539
 
 

 ##########
 File path: api/src/com/cloud/agent/api/storage/OVFHelper.java
 ##########
 @@ -0,0 +1,333 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.agent.api.storage;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.io.StringWriter;
+import java.util.ArrayList;
+import java.util.List;
+
+import javax.xml.parsers.DocumentBuilderFactory;
+import javax.xml.parsers.ParserConfigurationException;
+import javax.xml.transform.Transformer;
+import javax.xml.transform.TransformerException;
+import javax.xml.transform.TransformerFactory;
+import javax.xml.transform.dom.DOMSource;
+import javax.xml.transform.stream.StreamResult;
+
+import org.apache.log4j.Logger;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+import org.w3c.dom.NodeList;
+import org.xml.sax.SAXException;
+
+import com.cloud.agent.api.to.DatadiskTO;
+import com.cloud.utils.exception.CloudRuntimeException;
+
+public class OVFHelper {
+    private static final Logger s_logger = Logger.getLogger(OVFHelper.class);
+
+    public List<DatadiskTO> getOVFVolumeInfo(final String ovfFilePath) {
+        if (ovfFilePath == null || ovfFilePath.isEmpty()) {
+            return null;
+        }
+        ArrayList<OVFFile> vf = new ArrayList<OVFFile>();
+        ArrayList<OVFDisk> vd = new ArrayList<OVFDisk>();
+
+        File ovfFile = new File(ovfFilePath);
+        try {
+            final Document doc = 
DocumentBuilderFactory.newInstance().newDocumentBuilder().parse(new 
File(ovfFilePath));
+            NodeList disks = doc.getElementsByTagName("Disk");
+            NodeList files = doc.getElementsByTagName("File");
+            NodeList items = doc.getElementsByTagName("Item");
+            boolean toggle = true;
+            for (int j = 0; j < files.getLength(); j++) {
+                Element file = (Element)files.item(j);
+                OVFFile of = new OVFFile();
+                of._href = file.getAttribute("ovf:href");
+                if (of._href.endsWith("vmdk") || of._href.endsWith("iso")) {
+                    s_logger.info("MDOVA getOVFVolumeInfo File href = " + 
of._href);
+                    of._id = file.getAttribute("ovf:id");
+                    s_logger.info("MDOVA getOVFVolumeInfo File Id = " + 
of._id);
+                    String size = file.getAttribute("ovf:size");
+                    if (size != null && !size.isEmpty()) {
+                        of._size = Long.parseLong(size);
+                    } else {
+                        String dataDiskPath = ovfFile.getParent() + 
File.separator + of._href;
+                        File this_file = new File(dataDiskPath);
+                        of._size = this_file.length();
+                    }
+                    of._iso = of._href.endsWith("iso");
+                    if (toggle && !of._iso) {
+                        of._bootable = true;
+                        toggle = !toggle;
+                    }
+                    vf.add(of);
+                }
+            }
+            for (int i = 0; i < disks.getLength(); i++) {
+                Element disk = (Element)disks.item(i);
+                OVFDisk od = new OVFDisk();
+                String virtualSize = disk.getAttribute("ovf:capacity");
+                if (virtualSize == null || virtualSize.isEmpty()) {
+                    od._capacity = 0L;
+                } else {
+                    od._capacity = Long.parseLong(virtualSize);
+                }
+                String allocationUnits = 
disk.getAttribute("ovf:capacityAllocationUnits");
+                od._diskId = disk.getAttribute("ovf:diskId");
+                s_logger.info("MDOVA getOVFVolumeInfo Disk ovf:diskId  = " + 
od._diskId);
+                od._fileRef = disk.getAttribute("ovf:fileRef");
+                s_logger.info("MDOVA getOVFVolumeInfo Disk ovf:fileRef  = " + 
od._fileRef);
+                od._populatedSize = 
Long.parseLong(disk.getAttribute("ovf:populatedSize") == null ? "0" : 
disk.getAttribute("ovf:populatedSize"));
+                s_logger.info("MDOVA getOVFVolumeInfo Disk _populatedSize  = " 
+ od._populatedSize);
+
+                if ((od._capacity != 0) && (allocationUnits != null)) {
+
+                    long units = 1;
+                    if (allocationUnits.equalsIgnoreCase("KB") || 
allocationUnits.equalsIgnoreCase("KiloBytes") || 
allocationUnits.equalsIgnoreCase("byte * 2^10")) {
+                        units = 1024;
+                    } else if (allocationUnits.equalsIgnoreCase("MB") || 
allocationUnits.equalsIgnoreCase("MegaBytes") || 
allocationUnits.equalsIgnoreCase("byte * 2^20")) {
+                        units = 1024 * 1024;
+                    } else if (allocationUnits.equalsIgnoreCase("GB") || 
allocationUnits.equalsIgnoreCase("GigaBytes") || 
allocationUnits.equalsIgnoreCase("byte * 2^30")) {
+                        units = 1024 * 1024 * 1024;
+                    }
+                    od._capacity = od._capacity * units;
+                    s_logger.info("MDOVA getOVFVolumeInfo Disk _capacity  = " 
+ od._capacity);
+                }
+                od._controller = getControllerType(items, od._diskId);
+                vd.add(od);
+            }
+
+        } catch (SAXException | IOException | ParserConfigurationException e) {
+            s_logger.error("Unexpected exception caught while parsing ovf 
file:" + ovfFilePath, e);
+            throw new CloudRuntimeException(e);
+        }
+
+        List<DatadiskTO> disksTO = new ArrayList<DatadiskTO>();
+        for (OVFFile of : vf) {
+            if (of._id == null || of._id.isEmpty()){
+                s_logger.error("The ovf file info is incomplete file info");
+                throw new CloudRuntimeException("The ovf file info has 
incomplete file info");
+            }
+            OVFDisk cdisk = getDisk(of._id, vd);
+            if (cdisk == null && !of._iso){
+                s_logger.error("The ovf file info has incomplete disk info");
+                throw new CloudRuntimeException("The ovf file info has 
incomplete disk info");
+            }
+            Long capacity = cdisk == null ? of._size : cdisk._capacity;
+            String controller = cdisk == null ? "" : cdisk._controller._name;
+            String controllerSubType = cdisk == null ? "" : 
cdisk._controller._subType;
+            String dataDiskPath = ovfFile.getParent() + File.separator + 
of._href;
+            s_logger.info("MDOVA getOVFVolumeInfo diskName = " + of._href + ", 
dataDiskPath = " + dataDiskPath);
+            File f = new File(dataDiskPath);
+            if (!f.exists() || f.isDirectory()) {
+                s_logger.error("One of the attached disk or iso does not 
exists " + dataDiskPath);
+                throw new CloudRuntimeException("One of the attached disk or 
iso as stated on OVF does not exists " + dataDiskPath);
+            }
+            disksTO.add(new DatadiskTO(dataDiskPath, capacity, of._size, 
of._id, of._iso, of._bootable, controller, controllerSubType));
+        }
+        //check if first disk is an iso move it to the end
+        DatadiskTO fd = disksTO.get(0);
+        if (fd.isIso()) {
+            disksTO.remove(0);
+            disksTO.add(fd);
+        }
+        return disksTO;
+    }
+
+    private OVFDiskController getControllerType(final NodeList itemList, final 
String diskId) {
 
 Review comment:
   I agree, I'll refactor this soon

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


> Support OVA files with multiple disks for templates
> ---------------------------------------------------
>
>                 Key: CLOUDSTACK-4757
>                 URL: https://issues.apache.org/jira/browse/CLOUDSTACK-4757
>             Project: CloudStack
>          Issue Type: New Feature
>      Security Level: Public(Anyone can view this level - this is the 
> default.) 
>          Components: Storage Controller
>            Reporter: Likitha Shetty
>            Assignee: Nicolas Vazquez
>            Priority: Minor
>             Fix For: Future
>
>
> CloudStack volumes and templates are one single virtual disk in case of 
> XenServer/XCP and KVM hypervisors since the files used for templates and 
> volumes are virtual disks (VHD, QCOW2). However, VMware volumes and templates 
> are in OVA format, which are archives that can contain a complete VM 
> including multiple VMDKs and other files such as ISOs. And currently, 
> Cloudstack only supports Template creation based on OVA files containing a 
> single disk. If a user creates a template from a OVA file containing more 
> than 1 disk and launches an instance using this template, only the first disk 
> is attached to the new instance and other disks are ignored.
> Similarly with uploaded volumes, attaching an uploaded volume that contains 
> multiple disks to a VM will result in only one VMDK to being attached to the 
> VM.
> This behavior needs to be improved in VMWare to support OVA files with 
> multiple disks for both uploaded volumes and templates. i.e. If a user 
> creates a template from a OVA file containing more than 1 disk and launches 
> an instance using this template, the first disk should be attached to the new 
> instance as the ROOT disk and volumes should be created based on other VMDK 
> disks in the OVA file and should be attached to the instance.



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

Reply via email to