nvazquez commented on code in PR #12563:
URL: https://github.com/apache/cloudstack/pull/12563#discussion_r2944074805


##########
plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java:
##########
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.utils;
+
+import com.cloud.storage.ScopeType;
+import com.cloud.utils.StringUtils;
+import com.cloud.utils.exception.CloudRuntimeException;
+import org.apache.cloudstack.storage.feign.model.OntapStorage;
+import org.apache.cloudstack.storage.provider.StorageProviderFactory;
+import org.apache.cloudstack.storage.service.StorageStrategy;
+import org.apache.cloudstack.storage.service.model.ProtocolType;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.springframework.util.Base64Utils;
+
+import java.nio.charset.StandardCharsets;
+import java.util.Map;
+
+public class Utility {

Review Comment:
   Maybe renamed to `OntapStorageUtils`?



##########
plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Constants.java:
##########
@@ -0,0 +1,93 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.utils;
+
+
+public class Constants {

Review Comment:
   Maybe renamed to `OntapStorageConstants`? Also, some of these may be already 
available in the `ApiConstants` class



##########
plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java:
##########
@@ -0,0 +1,452 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+        package org.apache.cloudstack.storage.service;
+
+import com.cloud.utils.exception.CloudRuntimeException;
+import feign.FeignException;
+import org.apache.cloudstack.storage.feign.FeignClientFactory;
+import org.apache.cloudstack.storage.feign.client.AggregateFeignClient;
+import org.apache.cloudstack.storage.feign.client.JobFeignClient;
+import org.apache.cloudstack.storage.feign.client.NetworkFeignClient;
+import org.apache.cloudstack.storage.feign.client.SANFeignClient;
+import org.apache.cloudstack.storage.feign.client.SvmFeignClient;
+import org.apache.cloudstack.storage.feign.client.VolumeFeignClient;
+import org.apache.cloudstack.storage.feign.model.Aggregate;
+import org.apache.cloudstack.storage.feign.model.IpInterface;
+import org.apache.cloudstack.storage.feign.model.IscsiService;
+import org.apache.cloudstack.storage.feign.model.Job;
+import org.apache.cloudstack.storage.feign.model.Nas;
+import org.apache.cloudstack.storage.feign.model.OntapStorage;
+import org.apache.cloudstack.storage.feign.model.Svm;
+import org.apache.cloudstack.storage.feign.model.Volume;
+import org.apache.cloudstack.storage.feign.model.response.JobResponse;
+import org.apache.cloudstack.storage.feign.model.response.OntapResponse;
+import org.apache.cloudstack.storage.service.model.AccessGroup;
+import org.apache.cloudstack.storage.service.model.CloudStackVolume;
+import org.apache.cloudstack.storage.service.model.ProtocolType;
+import org.apache.cloudstack.storage.utils.Constants;
+import org.apache.cloudstack.storage.utils.Utility;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+
+public abstract class StorageStrategy {
+    private final FeignClientFactory feignClientFactory;
+    private final AggregateFeignClient aggregateFeignClient;
+    private final VolumeFeignClient volumeFeignClient;
+    private final SvmFeignClient svmFeignClient;
+    private final JobFeignClient jobFeignClient;
+    private final NetworkFeignClient networkFeignClient;
+    private final SANFeignClient sanFeignClient;
+
+    protected OntapStorage storage;
+
+    private List<Aggregate> aggregates;
+
+    private static final Logger s_logger = 
LogManager.getLogger(StorageStrategy.class);
+
+    public StorageStrategy(OntapStorage ontapStorage) {
+        storage = ontapStorage;
+        String baseURL = Constants.HTTPS + storage.getManagementLIF();
+        s_logger.info("Initializing StorageStrategy with base URL: " + 
baseURL);
+        this.feignClientFactory = new FeignClientFactory();
+        this.aggregateFeignClient = 
feignClientFactory.createClient(AggregateFeignClient.class, baseURL);
+        this.volumeFeignClient = 
feignClientFactory.createClient(VolumeFeignClient.class, baseURL);
+        this.svmFeignClient = 
feignClientFactory.createClient(SvmFeignClient.class, baseURL);
+        this.jobFeignClient = 
feignClientFactory.createClient(JobFeignClient.class, baseURL);
+        this.networkFeignClient = 
feignClientFactory.createClient(NetworkFeignClient.class, baseURL);
+        this.sanFeignClient = 
feignClientFactory.createClient(SANFeignClient.class, baseURL);
+    }
+
+    public boolean connect() {
+        s_logger.info("Attempting to connect to ONTAP cluster at " + 
storage.getManagementLIF() + " and validate SVM " +
+                storage.getSvmName() + ", protocol " + storage.getProtocol());
+        String authHeader = Utility.generateAuthHeader(storage.getUsername(), 
storage.getPassword());
+        String svmName = storage.getSvmName();
+        try {
+            Svm svm = new Svm();
+            s_logger.info("Fetching the SVM details...");
+            Map<String, Object> queryParams = Map.of(Constants.NAME, svmName, 
Constants.FIELDS, Constants.AGGREGATES +
+                    Constants.COMMA + Constants.STATE);
+            OntapResponse<Svm> svms = 
svmFeignClient.getSvmResponse(queryParams, authHeader);
+            if (svms != null && svms.getRecords() != null && 
!svms.getRecords().isEmpty()) {
+                svm = svms.getRecords().get(0);
+            } else {
+                s_logger.error("No SVM found on the ONTAP cluster by the name" 
+ svmName + ".");
+                return false;
+            }
+
+            s_logger.info("Validating SVM state and protocol settings...");
+            if (!Objects.equals(svm.getState(), Constants.RUNNING)) {
+                s_logger.error("SVM " + svmName + " is not in running state.");
+                return false;
+            }
+            if (Objects.equals(storage.getProtocol(), Constants.NFS) && 
!svm.getNfsEnabled()) {
+                s_logger.error("NFS protocol is not enabled on SVM " + 
svmName);
+                return false;
+            } else if (Objects.equals(storage.getProtocol(), Constants.ISCSI) 
&& !svm.getIscsiEnabled()) {
+                s_logger.error("iSCSI protocol is not enabled on SVM " + 
svmName);
+                return false;
+            }
+            List<Aggregate> aggrs = svm.getAggregates();
+            if (aggrs == null || aggrs.isEmpty()) {
+                s_logger.error("No aggregates are assigned to SVM " + svmName);
+                return false;
+            }
+            for (Aggregate aggr : aggrs) {
+                s_logger.debug("Found aggregate: " + aggr.getName() + " with 
UUID: " + aggr.getUuid());
+                Aggregate aggrResp = 
aggregateFeignClient.getAggregateByUUID(authHeader, aggr.getUuid());
+                if (!Objects.equals(aggrResp.getState(), 
Aggregate.StateEnum.ONLINE)) {
+                    s_logger.warn("Aggregate " + aggr.getName() + " is not in 
online state. Skipping this aggregate.");
+                    continue;
+                } else if (aggrResp.getSpace() == null || 
aggrResp.getAvailableBlockStorageSpace() == null ||
+                        aggrResp.getAvailableBlockStorageSpace() <= 
storage.getSize().doubleValue()) {
+                    s_logger.warn("Aggregate " + aggr.getName() + " does not 
have sufficient available space. Skipping this aggregate.");
+                    continue;
+                }
+                s_logger.info("Selected aggregate: " + aggr.getName() + " for 
volume operations.");
+                this.aggregates = List.of(aggr);
+                break;
+            }
+            if (this.aggregates == null || this.aggregates.isEmpty()) {
+                s_logger.error("No suitable aggregates found on SVM " + 
svmName + " for volume creation.");
+                return false;
+            }
+
+            s_logger.info("Successfully connected to ONTAP cluster and 
validated ONTAP details provided");
+        } catch (Exception e) {
+            s_logger.error("Failed to connect to ONTAP cluster: " + 
e.getMessage(), e);
+            return false;
+        }
+        return true;
+    }
+
+    public Volume createStorageVolume(String volumeName, Long size) {
+        s_logger.info("Creating volume: " + volumeName + " of size: " + size + 
" bytes");
+
+        String svmName = storage.getSvmName();
+        if (aggregates == null || aggregates.isEmpty()) {
+            s_logger.error("No aggregates available to create volume on SVM " 
+ svmName);
+            throw new CloudRuntimeException("No aggregates available to create 
volume on SVM " + svmName);
+        }
+        if (size == null || size <= 0) {
+            throw new CloudRuntimeException("Invalid volume size provided: " + 
size);
+        }
+
+        String authHeader = Utility.generateAuthHeader(storage.getUsername(), 
storage.getPassword());
+
+        Volume volumeRequest = new Volume();
+        Svm svm = new Svm();
+        svm.setName(svmName);
+        Nas nas = new Nas();
+        nas.setPath(Constants.SLASH + volumeName);
+
+        volumeRequest.setName(volumeName);
+        volumeRequest.setSvm(svm);
+
+        long maxAvailableAggregateSpaceBytes = -1L;
+        Aggregate aggrChosen = null;
+        for (Aggregate aggr : aggregates) {
+            s_logger.debug("Found aggregate: " + aggr.getName() + " with UUID: 
" + aggr.getUuid());
+            Aggregate aggrResp = 
aggregateFeignClient.getAggregateByUUID(authHeader, aggr.getUuid());
+
+            if (aggrResp == null) {
+                s_logger.warn("Aggregate details response is null for 
aggregate " + aggr.getName() + ". Skipping.");
+                continue;
+            }
+
+            if (!Objects.equals(aggrResp.getState(), 
Aggregate.StateEnum.ONLINE)) {
+                s_logger.warn("Aggregate " + aggr.getName() + " is not in 
online state. Skipping this aggregate.");
+                continue;
+            }
+
+            if (aggrResp.getSpace() == null || 
aggrResp.getAvailableBlockStorageSpace() == null) {
+                s_logger.warn("Aggregate " + aggr.getName() + " does not have 
space information. Skipping this aggregate.");
+                continue;
+            }
+
+            final long availableBytes = 
aggrResp.getAvailableBlockStorageSpace().longValue();
+            s_logger.debug("Aggregate " + aggr.getName() + " available bytes=" 
+ availableBytes + ", requested=" + size);
+
+            if (availableBytes <= size) {
+                s_logger.warn("Aggregate " + aggr.getName() + " does not have 
sufficient available space. Required=" +
+                        size + " bytes, available=" + availableBytes + " 
bytes. Skipping this aggregate.");
+                continue;
+            }
+
+            if (availableBytes > maxAvailableAggregateSpaceBytes) {
+                maxAvailableAggregateSpaceBytes = availableBytes;
+                aggrChosen = aggr;
+            }
+        }
+
+        if (aggrChosen == null) {
+            s_logger.error("No suitable aggregates found on SVM " + svmName + 
" for volume creation.");
+            throw new CloudRuntimeException("No suitable aggregates found on 
SVM " + svmName + " for volume operations.");
+        }
+        s_logger.info("Selected aggregate: " + aggrChosen.getName() + " for 
volume operations.");
+
+        Aggregate aggr = new Aggregate();
+        aggr.setName(aggrChosen.getName());
+        aggr.setUuid(aggrChosen.getUuid());
+        volumeRequest.setAggregates(List.of(aggr));
+        volumeRequest.setSize(size);
+        volumeRequest.setNas(nas);
+        try {
+            JobResponse jobResponse = 
volumeFeignClient.createVolumeWithJob(authHeader, volumeRequest);
+            if (jobResponse == null || jobResponse.getJob() == null) {
+                throw new CloudRuntimeException("Failed to initiate volume 
creation for " + volumeName);
+            }
+            String jobUUID = jobResponse.getJob().getUuid();
+
+            Boolean jobSucceeded = jobPollForSuccess(jobUUID);
+            if (!jobSucceeded) {
+                s_logger.error("Volume creation job failed for volume: " + 
volumeName);
+                throw new CloudRuntimeException("Volume creation job failed 
for volume: " + volumeName);
+            }
+            s_logger.info("Volume creation job completed successfully for 
volume: " + volumeName);
+        } catch (Exception e) {
+            s_logger.error("Exception while creating volume: ", e);
+            throw new CloudRuntimeException("Failed to create volume: " + 
e.getMessage());
+        }
+        OntapResponse<Volume> volumesResponse = 
volumeFeignClient.getAllVolumes(authHeader, Map.of(Constants.NAME, volumeName));
+        if (volumesResponse == null || volumesResponse.getRecords() == null || 
volumesResponse.getRecords().isEmpty()) {
+            s_logger.error("Volume " + volumeName + " not found after 
creation.");
+            throw new CloudRuntimeException("Volume " + volumeName + " not 
found after creation.");
+        }
+        Volume createdVolume = volumesResponse.getRecords().get(0);
+        if (createdVolume == null) {
+            s_logger.error("Failed to retrieve details of the created volume " 
+ volumeName);
+            throw new CloudRuntimeException("Failed to retrieve details of the 
created volume " + volumeName);
+        } else if (createdVolume.getName() == null || 
!createdVolume.getName().equals(volumeName)) {
+            s_logger.error("Mismatch in created volume name. Expected: " + 
volumeName + ", Found: " + createdVolume.getName());
+            throw new CloudRuntimeException("Mismatch in created volume name. 
Expected: " + volumeName + ", Found: " + createdVolume.getName());
+        }
+        s_logger.info("Volume created successfully: " + volumeName);
+        try {
+            Map<String, Object> queryParams = Map.of(Constants.NAME, 
volumeName);
+            s_logger.debug("Fetching volume details for: " + volumeName);
+
+            OntapResponse<Volume> ontapVolume = 
volumeFeignClient.getVolume(authHeader, queryParams);
+            s_logger.debug("Feign call completed. Processing response...");
+
+            if (ontapVolume == null) {
+                s_logger.error("OntapResponse is null for volume: " + 
volumeName);
+                throw new CloudRuntimeException("Failed to fetch volume " + 
volumeName + ": Response is null");
+            }
+            s_logger.debug("OntapResponse is not null. Checking records 
field...");
+
+            if (ontapVolume.getRecords() == null) {
+                s_logger.error("OntapResponse.records is null for volume: " + 
volumeName);
+                throw new CloudRuntimeException("Failed to fetch volume " + 
volumeName + ": Records list is null");
+            }
+            s_logger.debug("Records field is not null. Size: " + 
ontapVolume.getRecords().size());
+
+            if (ontapVolume.getRecords().isEmpty()) {
+                s_logger.error("OntapResponse.records is empty for volume: " + 
volumeName);
+                throw new CloudRuntimeException("Failed to fetch volume " + 
volumeName + ": No records found");
+            }
+
+            Volume volume = ontapVolume.getRecords().get(0);
+            s_logger.info("Volume retrieved successfully: " + volumeName + ", 
UUID: " + volume.getUuid());
+            return volume;
+        } catch (Exception e) {
+            s_logger.error("Exception while retrieving volume details for: " + 
volumeName, e);
+            throw new CloudRuntimeException("Failed to fetch volume: " + 
volumeName + ". Error: " + e.getMessage(), e);
+        }
+    }
+
+    public Volume updateStorageVolume(Volume volume) {
+        return null;
+    }
+
+    public void deleteStorageVolume(Volume volume) {
+        s_logger.info("Deleting ONTAP volume by name: " + volume.getName() + " 
and uuid: " + volume.getUuid());
+        String authHeader = Utility.generateAuthHeader(storage.getUsername(), 
storage.getPassword());
+        try {
+            JobResponse jobResponse = 
volumeFeignClient.deleteVolume(authHeader, volume.getUuid());
+            Boolean jobSucceeded = 
jobPollForSuccess(jobResponse.getJob().getUuid());
+            if (!jobSucceeded) {
+                s_logger.error("Volume deletion job failed for volume: " + 
volume.getName());
+                throw new CloudRuntimeException("Volume deletion job failed 
for volume: " + volume.getName());
+            }
+            s_logger.info("Volume deleted successfully: " + volume.getName());
+        } catch (FeignException.FeignClientException e) {
+            s_logger.error("Exception while deleting volume: ", e);
+            throw new CloudRuntimeException("Failed to delete volume: " + 
e.getMessage());
+        }
+        s_logger.info("ONTAP volume deletion process completed for volume: " + 
volume.getName());
+    }
+
+    public Volume getStorageVolume(Volume volume) {
+        return null;
+    }
+
+    public String getStoragePath() {
+        String authHeader = Utility.generateAuthHeader(storage.getUsername(), 
storage.getPassword());
+        String targetIqn = null;
+        try {
+            if (storage.getProtocol() == ProtocolType.ISCSI) {
+                s_logger.info("Fetching iSCSI target IQN for SVM: {}", 
storage.getSvmName());
+
+                Map<String, Object> queryParams = new HashMap<>();
+                queryParams.put(Constants.SVM_DOT_NAME, storage.getSvmName());
+                queryParams.put("fields", "enabled,target");
+                queryParams.put("max_records", "1");
+
+                OntapResponse<IscsiService> response = 
sanFeignClient.getIscsiServices(authHeader, queryParams);
+
+                if (response == null || response.getRecords() == null || 
response.getRecords().isEmpty()) {
+                    throw new CloudRuntimeException("No iSCSI service found 
for SVM: " + storage.getSvmName());
+                }
+
+                IscsiService iscsiService = response.getRecords().get(0);
+
+                if (iscsiService.getTarget() == null || 
iscsiService.getTarget().getName() == null) {
+                    throw new CloudRuntimeException("iSCSI target IQN not 
found for SVM: " + storage.getSvmName());
+                }
+
+                targetIqn = iscsiService.getTarget().getName();
+                s_logger.info("Retrieved iSCSI target IQN: {}", targetIqn);
+                return targetIqn;
+
+            } else if (storage.getProtocol() == ProtocolType.NFS3) {
+            } else {
+                throw new CloudRuntimeException("Unsupported protocol for path 
retrieval: " + storage.getProtocol());
+            }
+
+        } catch (FeignException.FeignClientException e) {
+            s_logger.error("Exception while retrieving storage path for 
protocol {}: {}", storage.getProtocol(), e.getMessage(), e);
+            throw new CloudRuntimeException("Failed to retrieve storage path: 
" + e.getMessage());
+        }
+        return targetIqn;
+    }
+
+    public String getNetworkInterface() {
+        String authHeader = Utility.generateAuthHeader(storage.getUsername(), 
storage.getPassword());
+        try {
+            Map<String, Object> queryParams = new HashMap<>();
+            queryParams.put(Constants.SVM_DOT_NAME, storage.getSvmName());
+            if (storage.getProtocol() != null) {
+                switch (storage.getProtocol()) {
+                    case NFS3:
+                        queryParams.put(Constants.SERVICES, 
Constants.DATA_NFS);
+                        break;
+                    case ISCSI:
+                        queryParams.put(Constants.SERVICES, 
Constants.DATA_ISCSI);
+                        break;
+                    default:
+                        s_logger.error("Unsupported protocol: " + 
storage.getProtocol());
+                        throw new CloudRuntimeException("Unsupported protocol: 
" + storage.getProtocol());
+                }
+            }
+            queryParams.put(Constants.FIELDS, Constants.IP_ADDRESS);
+            queryParams.put(Constants.RETURN_RECORDS, Constants.TRUE);
+            OntapResponse<IpInterface> response =
+                    networkFeignClient.getNetworkIpInterfaces(authHeader, 
queryParams);
+            if (response != null && response.getRecords() != null && 
!response.getRecords().isEmpty()) {
+                IpInterface ipInterface = null;
+                if (storage.getProtocol() == ProtocolType.ISCSI) {
+                    ipInterface = response.getRecords().get(0);
+                } else if (storage.getProtocol() == ProtocolType.NFS3) {
+                    for (IpInterface iface : response.getRecords()) {
+                        if (iface.getIp().getAddress().contains(".")) {
+                            ipInterface = iface;
+                            break;
+                        }
+                    }
+                }
+
+                s_logger.info("Retrieved network interface: " + 
ipInterface.getIp().getAddress());
+                return ipInterface.getIp().getAddress();
+            } else {
+                throw new CloudRuntimeException("No network interfaces found 
for SVM " + storage.getSvmName() +
+                        " for protocol " + storage.getProtocol());
+            }
+        } catch (FeignException.FeignClientException e) {
+            s_logger.error("Exception while retrieving network interfaces: ", 
e);
+            throw new CloudRuntimeException("Failed to retrieve network 
interfaces: " + e.getMessage());
+        }
+    }
+
+    abstract public CloudStackVolume createCloudStackVolume(CloudStackVolume 
cloudstackVolume);
+
+    abstract CloudStackVolume updateCloudStackVolume(CloudStackVolume 
cloudstackVolume);
+
+    abstract public void deleteCloudStackVolume(CloudStackVolume 
cloudstackVolume);
+
+    abstract public void copyCloudStackVolume(CloudStackVolume 
cloudstackVolume);
+
+    abstract public CloudStackVolume getCloudStackVolume(Map<String, String> 
cloudStackVolumeMap);
+
+    abstract public AccessGroup createAccessGroup(AccessGroup accessGroup);
+
+    abstract public void deleteAccessGroup(AccessGroup accessGroup);
+
+    abstract AccessGroup updateAccessGroup(AccessGroup accessGroup);
+
+    abstract public AccessGroup getAccessGroup(Map<String, String> values);
+
+    abstract public Map<String,String> enableLogicalAccess(Map<String,String> 
values);
+
+    abstract public void disableLogicalAccess(Map<String, String> values);
+
+    abstract public Map<String, String> getLogicalAccess(Map<String, String> 
values);

Review Comment:
   Maybe add an interface for these methods?



##########
plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/StorageProviderFactory.java:
##########
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.provider;
+
+import com.cloud.utils.component.ComponentContext;
+import com.cloud.utils.exception.CloudRuntimeException;
+import org.apache.cloudstack.storage.feign.model.OntapStorage;
+import org.apache.cloudstack.storage.service.StorageStrategy;
+import org.apache.cloudstack.storage.service.UnifiedNASStrategy;
+import org.apache.cloudstack.storage.service.UnifiedSANStrategy;
+import org.apache.cloudstack.storage.service.model.ProtocolType;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+
+
+public class StorageProviderFactory {
+    private static final Logger s_logger = 
LogManager.getLogger(StorageProviderFactory.class);
+
+    public static StorageStrategy getStrategy(OntapStorage ontapStorage) {
+        ProtocolType protocol = ontapStorage.getProtocol();
+        s_logger.info("Initializing StorageProviderFactory with protocol: " + 
protocol);
+        switch (protocol) {
+            case NFS3:
+                if (!ontapStorage.getIsDisaggregated()) {
+                    UnifiedNASStrategy unifiedNASStrategy = new 
UnifiedNASStrategy(ontapStorage);
+                    ComponentContext.inject(unifiedNASStrategy);
+                    unifiedNASStrategy.setOntapStorage(ontapStorage);
+                    return unifiedNASStrategy;
+                }
+                throw new CloudRuntimeException("Unsupported configuration: 
Disaggregated ONTAP is not supported.");
+            case ISCSI:
+                if (!ontapStorage.getIsDisaggregated()) {
+                    UnifiedSANStrategy unifiedSANStrategy = new 
UnifiedSANStrategy(ontapStorage);
+                    ComponentContext.inject(unifiedSANStrategy);
+                    unifiedSANStrategy.setOntapStorage(ontapStorage);
+                    return unifiedSANStrategy;
+                }
+                throw new CloudRuntimeException("Unsupported configuration: 
Disaggregated ONTAP is not supported.");
+            default:
+                throw new CloudRuntimeException("Unsupported protocol: " + 
protocol);
+        }

Review Comment:
   I think this can be reduced since NFS3 and ISCSI cases are very similar 
except for the instantiation of the strategy - can you consider refactoring to 
a single method similar to this:
   
   ````
   if (!NFS3.equalsIgnoreCase(protocol) && !ISCSI.equalsIgnoreCase(protocol)) {
      throw new CloudRuntimeException("Unsupported protocol: " + protocol);
   }
   
   if (!ontapStorage.getIsDisaggregated()) {
         StorageStrategy unifiedStrategy = protocol.equalsIgnoreCase(NFS3) ? 
new UnifiedNASStrategy(ontapStorage) : new UnifiedSANStrategy(ontapStorage);
         ComponentContext.inject(unifiedStrategy);
         unifiedStrategy.setOntapStorage(ontapStorage);
         return unifiedStrategy;
     }
   ````



##########
plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java:
##########
@@ -0,0 +1,452 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+        package org.apache.cloudstack.storage.service;

Review Comment:
   Minor: extra tab at the start of the line?



##########
plugins/storage/volume/ontap/README.md:
##########
@@ -0,0 +1,123 @@
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied.  See the License for the
+  specific language governing permissions and limitations
+  under the License.
+-->
+
+# Apache CloudStack - NetApp ONTAP Storage Plugin

Review Comment:
   +1 great file - can you raise a documentation PR to 
https://github.com/apache/cloudstack-documentation as well including this 
content?



##########
plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java:
##########
@@ -0,0 +1,323 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.service;
+
+import com.cloud.host.HostVO;
+import com.cloud.storage.dao.VolumeDao;
+import com.cloud.utils.exception.CloudRuntimeException;
+import feign.FeignException;
+import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector;
+import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
+import org.apache.cloudstack.storage.feign.FeignClientFactory;
+import org.apache.cloudstack.storage.feign.client.JobFeignClient;
+import org.apache.cloudstack.storage.feign.client.NASFeignClient;
+import org.apache.cloudstack.storage.feign.client.VolumeFeignClient;
+import org.apache.cloudstack.storage.feign.model.ExportPolicy;
+import org.apache.cloudstack.storage.feign.model.ExportRule;
+import org.apache.cloudstack.storage.feign.model.Job;
+import org.apache.cloudstack.storage.feign.model.Nas;
+import org.apache.cloudstack.storage.feign.model.OntapStorage;
+import org.apache.cloudstack.storage.feign.model.Svm;
+import org.apache.cloudstack.storage.feign.model.Volume;
+import org.apache.cloudstack.storage.feign.model.response.JobResponse;
+import org.apache.cloudstack.storage.feign.model.response.OntapResponse;
+import org.apache.cloudstack.storage.service.model.AccessGroup;
+import org.apache.cloudstack.storage.service.model.CloudStackVolume;
+import org.apache.cloudstack.storage.utils.Constants;
+import org.apache.cloudstack.storage.utils.Utility;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+
+import javax.inject.Inject;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+public class UnifiedNASStrategy extends NASStrategy {
+
+    private static final Logger s_logger = 
LogManager.getLogger(UnifiedNASStrategy.class);
+    private final FeignClientFactory feignClientFactory;
+    private final NASFeignClient nasFeignClient;
+    private final VolumeFeignClient volumeFeignClient;
+    private final JobFeignClient jobFeignClient;
+    @Inject private VolumeDao volumeDao;
+    @Inject private EndPointSelector epSelector;
+    @Inject private StoragePoolDetailsDao storagePoolDetailsDao;
+
+    public UnifiedNASStrategy(OntapStorage ontapStorage) {
+        super(ontapStorage);
+        String baseURL = Constants.HTTPS + ontapStorage.getManagementLIF();
+        this.feignClientFactory = new FeignClientFactory();
+        this.nasFeignClient = 
feignClientFactory.createClient(NASFeignClient.class, baseURL);
+        this.volumeFeignClient = 
feignClientFactory.createClient(VolumeFeignClient.class,baseURL );
+        this.jobFeignClient = 
feignClientFactory.createClient(JobFeignClient.class, baseURL );
+    }
+
+    public void setOntapStorage(OntapStorage ontapStorage) {
+        this.storage = ontapStorage;
+    }
+
+    @Override
+    public CloudStackVolume createCloudStackVolume(CloudStackVolume 
cloudstackVolume) {
+        return null;
+    }
+
+    @Override
+    CloudStackVolume updateCloudStackVolume(CloudStackVolume cloudstackVolume) 
{
+        return null;
+    }
+
+    @Override
+    public void deleteCloudStackVolume(CloudStackVolume cloudstackVolume) {
+    }
+
+    @Override
+    public void copyCloudStackVolume(CloudStackVolume cloudstackVolume) {
+
+    }
+
+    @Override
+    public CloudStackVolume getCloudStackVolume(Map<String, String> 
cloudStackVolumeMap) {
+        return null;
+    }

Review Comment:
   What is the use for these methods? Will they be extended in future?



##########
plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java:
##########
@@ -0,0 +1,528 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.lifecycle;
+
+
+import com.cloud.agent.api.StoragePoolInfo;
+import com.cloud.dc.ClusterVO;
+import com.cloud.dc.dao.ClusterDao;
+import com.cloud.exception.InvalidParameterValueException;
+import com.cloud.host.HostVO;
+import com.cloud.hypervisor.Hypervisor;
+import com.cloud.resource.ResourceManager;
+import com.cloud.storage.Storage;
+import com.cloud.storage.StorageManager;
+import com.cloud.storage.StoragePool;
+import com.cloud.storage.StoragePoolAutomation;
+import com.cloud.utils.exception.CloudRuntimeException;
+import com.google.common.base.Preconditions;
+import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
+import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
+import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
+import 
org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle;
+import 
org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters;
+import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
+import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
+import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDetailsDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
+import 
org.apache.cloudstack.storage.datastore.lifecycle.BasePrimaryDataStoreLifeCycleImpl;
+import org.apache.cloudstack.storage.feign.model.OntapStorage;
+import org.apache.cloudstack.storage.feign.model.Volume;
+import org.apache.cloudstack.storage.provider.StorageProviderFactory;
+import org.apache.cloudstack.storage.service.StorageStrategy;
+import org.apache.cloudstack.storage.service.model.AccessGroup;
+import org.apache.cloudstack.storage.service.model.ProtocolType;
+import org.apache.cloudstack.storage.utils.Constants;
+import org.apache.cloudstack.storage.utils.Utility;
+import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+
+import javax.inject.Inject;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.UUID;
+
+public class OntapPrimaryDatastoreLifecycle extends 
BasePrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle {
+    @Inject private ClusterDao _clusterDao;
+    @Inject private StorageManager _storageMgr;
+    @Inject private ResourceManager _resourceMgr;
+    @Inject private PrimaryDataStoreHelper _dataStoreHelper;
+    @Inject private PrimaryDataStoreDetailsDao _datastoreDetailsDao;
+    @Inject private StoragePoolAutomation _storagePoolAutomation;
+    @Inject private PrimaryDataStoreDao storagePoolDao;
+    @Inject private StoragePoolDetailsDao storagePoolDetailsDao;
+    private static final Logger s_logger = 
LogManager.getLogger(OntapPrimaryDatastoreLifecycle.class);
+
+    private static final long ONTAP_MIN_VOLUME_SIZE = 1677721600L;
+
+    @Override
+    public DataStore initialize(Map<String, Object> dsInfos) {
+        if (dsInfos == null) {
+            throw new CloudRuntimeException("Datastore info map is null, 
cannot create primary storage");
+        }
+        String url = (String) dsInfos.get("url");
+        Long zoneId = (Long) dsInfos.get("zoneId");
+        Long podId = (Long) dsInfos.get("podId");
+        Long clusterId = (Long) dsInfos.get("clusterId");
+        String storagePoolName = (String) dsInfos.get("name");
+        String providerName = (String) dsInfos.get("providerName");
+        Long capacityBytes = (Long) dsInfos.get("capacityBytes");
+        boolean managed = (boolean) dsInfos.get("managed");
+        String tags = (String) dsInfos.get("tags");
+        Boolean isTagARule = (Boolean) dsInfos.get("isTagARule");
+
+        s_logger.info("Creating ONTAP primary storage pool with name: " + 
storagePoolName + ", provider: " + providerName +
+                ", zoneId: " + zoneId + ", podId: " + podId + ", clusterId: " 
+ clusterId);
+        s_logger.debug("Received capacityBytes from UI: " + capacityBytes);
+
+        @SuppressWarnings("unchecked")
+        Map<String, String> details = (Map<String, String>) 
dsInfos.get("details");
+
+        if (capacityBytes == null || capacityBytes <= 0) {
+            s_logger.warn("capacityBytes not provided or invalid (" + 
capacityBytes + "), using ONTAP minimum size: " + ONTAP_MIN_VOLUME_SIZE);
+            capacityBytes = ONTAP_MIN_VOLUME_SIZE;
+        } else if (capacityBytes < ONTAP_MIN_VOLUME_SIZE) {
+            s_logger.warn("capacityBytes (" + capacityBytes + ") is below 
ONTAP minimum (" + ONTAP_MIN_VOLUME_SIZE + "), adjusting to minimum");
+            capacityBytes = ONTAP_MIN_VOLUME_SIZE;
+        }
+
+        if (podId == null ^ clusterId == null) {
+            throw new CloudRuntimeException("Cluster Id or Pod Id is null, 
cannot create primary storage");
+        }
+
+        if (podId == null && clusterId == null) {
+            if (zoneId != null) {
+                s_logger.info("Both Pod Id and Cluster Id are null, Primary 
storage pool will be associated with a Zone");
+            } else {
+                throw new CloudRuntimeException("Pod Id, Cluster Id and Zone 
Id are all null, cannot create primary storage");
+            }
+        }

Review Comment:
   I think these parameters validation deserves a separate method



##########
plugins/storage/volume/ontap/pom.xml:
##########
@@ -0,0 +1,169 @@
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied.  See the License for the
+  specific language governing permissions and limitations
+  under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"; 
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance";
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xsd/maven-4.0.0.xsd";>
+    <modelVersion>4.0.0</modelVersion>
+    <artifactId>cloud-plugin-storage-volume-ontap</artifactId>
+    <name>Apache CloudStack Plugin - Storage Volume ONTAP Provider</name>
+    <parent>
+        <groupId>org.apache.cloudstack</groupId>
+        <artifactId>cloudstack-plugins</artifactId>
+        <version>4.23.0.0-SNAPSHOT</version>
+        <relativePath>../../../pom.xml</relativePath>
+    </parent>
+    <properties>
+        <spring-cloud.version>2021.0.7</spring-cloud.version>
+        <openfeign.version>11.0</openfeign.version>
+        <httpclient.version>4.5.14</httpclient.version>
+        <swagger-annotations.version>1.6.2</swagger-annotations.version>
+        <maven-compiler-plugin.version>3.8.1</maven-compiler-plugin.version>
+        <maven-surefire-plugin.version>2.22.2</maven-surefire-plugin.version>
+        <jackson-databind.version>2.13.4</jackson-databind.version>
+        <assertj.version>3.24.2</assertj.version>
+        <junit-jupiter.version>5.8.1</junit-jupiter.version>
+        <mockito.version>3.12.4</mockito.version>
+        <mockito-junit-jupiter.version>5.2.0</mockito-junit-jupiter.version>

Review Comment:
   +1 reusing parent dependencies would be better



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to