sandeeplocharla commented on code in PR #12563:
URL: https://github.com/apache/cloudstack/pull/12563#discussion_r3039949774


##########
plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Policy.java:
##########
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.feign.model;
+
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonInclude;
+
+import java.util.Objects;
+
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonInclude(JsonInclude.Include.NON_NULL)
+public class Policy {
+    private int minThroughputIops;

Review Comment:
   Not really necessary, as 'Policy' object's prime purpose is to be used in 
another model object. We still created it separately as we might need it in the 
near future.



##########
plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java:
##########
@@ -0,0 +1,178 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.cloudstack.storage.driver;
+
+import com.cloud.agent.api.to.DataStoreTO;
+import com.cloud.agent.api.to.DataTO;
+import com.cloud.host.Host;
+import com.cloud.storage.Storage;
+import com.cloud.storage.StoragePool;
+import com.cloud.storage.Volume;
+import com.cloud.utils.Pair;
+import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo;
+import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
+import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
+import 
org.apache.cloudstack.engine.subsystem.api.storage.DataStoreCapabilities;
+import 
org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
+import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
+import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo;
+import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
+import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
+import org.apache.cloudstack.storage.command.CommandResult;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+
+import java.util.HashMap;
+import java.util.Map;
+
+public class OntapPrimaryDatastoreDriver implements PrimaryDataStoreDriver {
+
+    private static final Logger s_logger = 
LogManager.getLogger(OntapPrimaryDatastoreDriver.class);

Review Comment:
   Sure, thank you!



##########
plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/FeignConfiguration.java:
##########
@@ -0,0 +1,158 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.feign;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import feign.RequestInterceptor;
+import feign.Retryer;
+import feign.Client;
+import feign.httpclient.ApacheHttpClient;
+import feign.codec.Decoder;
+import feign.codec.Encoder;
+import feign.Response;
+import feign.codec.DecodeException;
+import feign.codec.EncodeException;
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.databind.DeserializationFeature;
+import org.apache.http.conn.ConnectionKeepAliveStrategy;
+import org.apache.http.conn.ssl.NoopHostnameVerifier;
+import org.apache.http.conn.ssl.SSLConnectionSocketFactory;
+import org.apache.http.conn.ssl.TrustAllStrategy;
+import org.apache.http.impl.client.CloseableHttpClient;
+import org.apache.http.impl.client.HttpClientBuilder;
+import org.apache.http.ssl.SSLContexts;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+
+import javax.net.ssl.SSLContext;
+import java.io.IOException;
+import java.io.InputStream;
+import java.lang.reflect.Type;
+import java.nio.charset.StandardCharsets;
+import java.util.concurrent.TimeUnit;
+
+public class FeignConfiguration {
+    private static final Logger logger = 
LogManager.getLogger(FeignConfiguration.class);
+
+    private final int retryMaxAttempt = 3;
+    private final int retryMaxInterval = 5;
+    private final String ontapFeignMaxConnection = "80";
+    private final String ontapFeignMaxConnectionPerRoute = "20";
+    private final ObjectMapper objectMapper;
+
+    public FeignConfiguration() {
+        this.objectMapper = new ObjectMapper();
+        
this.objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, 
false);
+    }
+
+    public Client createClient() {
+        int maxConn;
+        int maxConnPerRoute;
+        try {
+            maxConn = Integer.parseInt(this.ontapFeignMaxConnection);
+        } catch (Exception e) {
+            logger.error("ontapFeignClient: parse max connection failed, using 
default");
+            maxConn = 20;
+        }
+        try {
+            maxConnPerRoute = 
Integer.parseInt(this.ontapFeignMaxConnectionPerRoute);
+        } catch (Exception e) {
+            logger.error("ontapFeignClient: parse max connection per route 
failed, using default");
+            maxConnPerRoute = 2;

Review Comment:
   Same as previous



##########
plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java:
##########
@@ -0,0 +1,535 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.lifecycle;
+
+
+import com.cloud.agent.api.StoragePoolInfo;
+import com.cloud.dc.ClusterVO;
+import com.cloud.dc.dao.ClusterDao;
+import com.cloud.exception.InvalidParameterValueException;
+import com.cloud.host.HostVO;
+import com.cloud.hypervisor.Hypervisor;
+import com.cloud.resource.ResourceManager;
+import com.cloud.storage.Storage;
+import com.cloud.storage.StorageManager;
+import com.cloud.storage.StoragePool;
+import com.cloud.storage.StoragePoolAutomation;
+import com.cloud.utils.exception.CloudRuntimeException;
+import com.google.common.base.Preconditions;
+import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
+import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
+import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
+import 
org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle;
+import 
org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters;
+import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
+import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
+import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDetailsDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
+import 
org.apache.cloudstack.storage.datastore.lifecycle.BasePrimaryDataStoreLifeCycleImpl;
+import org.apache.cloudstack.storage.feign.model.OntapStorage;
+import org.apache.cloudstack.storage.feign.model.Volume;
+import org.apache.cloudstack.storage.provider.StorageProviderFactory;
+import org.apache.cloudstack.storage.service.StorageStrategy;
+import org.apache.cloudstack.storage.service.model.AccessGroup;
+import org.apache.cloudstack.storage.service.model.ProtocolType;
+import org.apache.cloudstack.storage.utils.OntapStorageConstants;
+import org.apache.cloudstack.storage.utils.OntapStorageUtils;
+import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+
+import javax.inject.Inject;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.UUID;
+
+public class OntapPrimaryDatastoreLifecycle extends 
BasePrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle {
+    @Inject private ClusterDao _clusterDao;
+    @Inject private StorageManager _storageMgr;
+    @Inject private ResourceManager _resourceMgr;
+    @Inject private PrimaryDataStoreHelper _dataStoreHelper;
+    @Inject private PrimaryDataStoreDetailsDao _datastoreDetailsDao;
+    @Inject private StoragePoolAutomation _storagePoolAutomation;
+    @Inject private PrimaryDataStoreDao storagePoolDao;
+    @Inject private StoragePoolDetailsDao storagePoolDetailsDao;
+    private static final Logger s_logger = 
LogManager.getLogger(OntapPrimaryDatastoreLifecycle.class);
+
+    private static final long ONTAP_MIN_VOLUME_SIZE = 1677721600L;
+
+    @Override
+    public DataStore initialize(Map<String, Object> dsInfos) {
+        if (dsInfos == null) {
+            throw new CloudRuntimeException("Datastore info map is null, 
cannot create primary storage");
+        }
+        String url = (String) dsInfos.get("url");
+        Long zoneId = (Long) dsInfos.get("zoneId");
+        Long podId = (Long) dsInfos.get("podId");
+        Long clusterId = (Long) dsInfos.get("clusterId");
+        String storagePoolName = (String) dsInfos.get("name");
+        String providerName = (String) dsInfos.get("providerName");
+        Long capacityBytes = (Long) dsInfos.get("capacityBytes");
+        boolean managed = (boolean) dsInfos.get("managed");

Review Comment:
   This code has been changed now. We have modified the code to be manageable. 
But, given that the PR was from Feb, there would some improvements coming in 
with upcoming PRs



##########
plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java:
##########
@@ -0,0 +1,535 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.lifecycle;
+
+
+import com.cloud.agent.api.StoragePoolInfo;
+import com.cloud.dc.ClusterVO;
+import com.cloud.dc.dao.ClusterDao;
+import com.cloud.exception.InvalidParameterValueException;
+import com.cloud.host.HostVO;
+import com.cloud.hypervisor.Hypervisor;
+import com.cloud.resource.ResourceManager;
+import com.cloud.storage.Storage;
+import com.cloud.storage.StorageManager;
+import com.cloud.storage.StoragePool;
+import com.cloud.storage.StoragePoolAutomation;
+import com.cloud.utils.exception.CloudRuntimeException;
+import com.google.common.base.Preconditions;
+import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
+import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
+import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
+import 
org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle;
+import 
org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters;
+import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
+import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
+import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDetailsDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
+import 
org.apache.cloudstack.storage.datastore.lifecycle.BasePrimaryDataStoreLifeCycleImpl;
+import org.apache.cloudstack.storage.feign.model.OntapStorage;
+import org.apache.cloudstack.storage.feign.model.Volume;
+import org.apache.cloudstack.storage.provider.StorageProviderFactory;
+import org.apache.cloudstack.storage.service.StorageStrategy;
+import org.apache.cloudstack.storage.service.model.AccessGroup;
+import org.apache.cloudstack.storage.service.model.ProtocolType;
+import org.apache.cloudstack.storage.utils.OntapStorageConstants;
+import org.apache.cloudstack.storage.utils.OntapStorageUtils;
+import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+
+import javax.inject.Inject;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.UUID;
+
+public class OntapPrimaryDatastoreLifecycle extends 
BasePrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle {
+    @Inject private ClusterDao _clusterDao;
+    @Inject private StorageManager _storageMgr;
+    @Inject private ResourceManager _resourceMgr;
+    @Inject private PrimaryDataStoreHelper _dataStoreHelper;
+    @Inject private PrimaryDataStoreDetailsDao _datastoreDetailsDao;
+    @Inject private StoragePoolAutomation _storagePoolAutomation;
+    @Inject private PrimaryDataStoreDao storagePoolDao;
+    @Inject private StoragePoolDetailsDao storagePoolDetailsDao;
+    private static final Logger s_logger = 
LogManager.getLogger(OntapPrimaryDatastoreLifecycle.class);
+
+    private static final long ONTAP_MIN_VOLUME_SIZE = 1677721600L;
+
+    @Override
+    public DataStore initialize(Map<String, Object> dsInfos) {
+        if (dsInfos == null) {
+            throw new CloudRuntimeException("Datastore info map is null, 
cannot create primary storage");
+        }
+        String url = (String) dsInfos.get("url");
+        Long zoneId = (Long) dsInfos.get("zoneId");
+        Long podId = (Long) dsInfos.get("podId");
+        Long clusterId = (Long) dsInfos.get("clusterId");
+        String storagePoolName = (String) dsInfos.get("name");
+        String providerName = (String) dsInfos.get("providerName");
+        Long capacityBytes = (Long) dsInfos.get("capacityBytes");
+        boolean managed = (boolean) dsInfos.get("managed");
+        String tags = (String) dsInfos.get("tags");
+        Boolean isTagARule = (Boolean) dsInfos.get("isTagARule");
+
+        s_logger.info("Creating ONTAP primary storage pool with name: " + 
storagePoolName + ", provider: " + providerName +
+                ", zoneId: " + zoneId + ", podId: " + podId + ", clusterId: " 
+ clusterId);
+        s_logger.debug("Received capacityBytes from UI: " + capacityBytes);
+
+        @SuppressWarnings("unchecked")
+        Map<String, String> details = (Map<String, String>) 
dsInfos.get("details");
+
+        capacityBytes = validateInitializeInputs(capacityBytes, podId, 
clusterId, zoneId, storagePoolName, providerName, managed, url, details);
+
+        PrimaryDataStoreParameters parameters = new 
PrimaryDataStoreParameters();
+        if (clusterId != null) {
+            ClusterVO clusterVO = _clusterDao.findById(clusterId);
+            Preconditions.checkNotNull(clusterVO, "Unable to locate the 
specified cluster");
+            if (clusterVO.getHypervisorType() != 
Hypervisor.HypervisorType.KVM) {
+                throw new CloudRuntimeException("ONTAP primary storage is 
supported only for KVM hypervisor");
+            }
+            parameters.setHypervisorType(clusterVO.getHypervisorType());
+        }
+
+        details.put(OntapStorageConstants.SIZE, capacityBytes.toString());
+        details.putIfAbsent(OntapStorageConstants.IS_DISAGGREGATED, "false");
+
+        ProtocolType protocol = 
ProtocolType.valueOf(details.get(OntapStorageConstants.PROTOCOL));
+
+        long volumeSize = 
Long.parseLong(details.get(OntapStorageConstants.SIZE));
+        OntapStorage ontapStorage = new OntapStorage(
+                details.get(OntapStorageConstants.USERNAME),
+                details.get(OntapStorageConstants.PASSWORD),
+                details.get(OntapStorageConstants.MANAGEMENT_LIF),
+                details.get(OntapStorageConstants.SVM_NAME),
+                volumeSize,
+                protocol,
+                
Boolean.parseBoolean(details.get(OntapStorageConstants.IS_DISAGGREGATED).toLowerCase()));
+
+        StorageStrategy storageStrategy = 
StorageProviderFactory.getStrategy(ontapStorage);
+        boolean isValid = storageStrategy.connect();
+        if (isValid) {
+            String dataLIF = storageStrategy.getNetworkInterface();
+            if (dataLIF == null || dataLIF.isEmpty()) {
+                throw new CloudRuntimeException("Failed to retrieve Data LIF 
from ONTAP, cannot create primary storage");
+            }
+            s_logger.info("Using Data LIF for storage access: " + dataLIF);
+            details.put(OntapStorageConstants.DATA_LIF, dataLIF);
+            s_logger.info("Creating ONTAP volume '" + storagePoolName + "' 
with size: " + volumeSize + " bytes (" +
+                    (volumeSize / (1024 * 1024 * 1024)) + " GB)");
+            try {
+                Volume volume = 
storageStrategy.createStorageVolume(storagePoolName, volumeSize);
+                if (volume == null) {
+                    s_logger.error("createStorageVolume returned null for 
volume: " + storagePoolName);
+                    throw new CloudRuntimeException("Failed to create ONTAP 
volume: " + storagePoolName);
+                }
+                s_logger.info("Volume object retrieved successfully. UUID: " + 
volume.getUuid() + ", Name: " + volume.getName());
+                details.putIfAbsent(OntapStorageConstants.VOLUME_UUID, 
volume.getUuid());
+                details.putIfAbsent(OntapStorageConstants.VOLUME_NAME, 
volume.getName());
+            } catch (Exception e) {
+                s_logger.error("Exception occurred while creating ONTAP 
volume: " + storagePoolName, e);
+                throw new CloudRuntimeException("Failed to create ONTAP 
volume: " + storagePoolName + ". Error: " + e.getMessage(), e);
+            }
+        } else {
+            throw new CloudRuntimeException("ONTAP details validation failed, 
cannot create primary storage");
+        }
+
+        String path;
+        int port;
+        switch (protocol) {
+            case NFS3:
+                parameters.setType(Storage.StoragePoolType.NetworkFilesystem);
+                path = OntapStorageConstants.SLASH + storagePoolName;
+                port = OntapStorageConstants.NFS3_PORT;
+                s_logger.info("Setting NFS path for storage pool: " + path + 
", port: " + port);
+                break;
+            case ISCSI:
+                parameters.setType(Storage.StoragePoolType.Iscsi);
+                path = storageStrategy.getStoragePath();
+                port = OntapStorageConstants.ISCSI_PORT;
+                s_logger.info("Setting iSCSI path for storage pool: " + path + 
", port: " + port);
+                break;
+            default:
+                throw new CloudRuntimeException("Unsupported protocol: " + 
protocol + ", cannot create primary storage");
+        }
+
+        parameters.setHost(details.get(OntapStorageConstants.DATA_LIF));
+        parameters.setPort(port);
+        parameters.setPath(path);
+        parameters.setTags(tags);
+        parameters.setIsTagARule(isTagARule);
+        parameters.setDetails(details);
+        parameters.setUuid(UUID.randomUUID().toString());
+        parameters.setZoneId(zoneId);
+        parameters.setPodId(podId);
+        parameters.setClusterId(clusterId);
+        parameters.setName(storagePoolName);
+        parameters.setProviderName(providerName);
+        parameters.setManaged(managed);
+        parameters.setCapacityBytes(capacityBytes);
+        parameters.setUsedBytes(0);
+
+        return _dataStoreHelper.createPrimaryDataStore(parameters);
+    }
+
+    private long validateInitializeInputs(Long capacityBytes, Long podId, Long 
clusterId, Long zoneId,
+            String storagePoolName, String providerName, boolean managed, 
String url, Map<String, String> details) {
+
+        // Capacity validation
+        if (capacityBytes == null || capacityBytes <= 0) {
+            s_logger.warn("capacityBytes not provided or invalid (" + 
capacityBytes + "), using ONTAP minimum size: " + ONTAP_MIN_VOLUME_SIZE);
+            capacityBytes = ONTAP_MIN_VOLUME_SIZE;
+        } else if (capacityBytes < ONTAP_MIN_VOLUME_SIZE) {
+            s_logger.warn("capacityBytes (" + capacityBytes + ") is below 
ONTAP minimum (" + ONTAP_MIN_VOLUME_SIZE + "), adjusting to minimum");
+            capacityBytes = ONTAP_MIN_VOLUME_SIZE;
+        }

Review Comment:
   sure, will check this out



##########
plugins/storage/volume/ontap/README.md:
##########
@@ -0,0 +1,123 @@
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied.  See the License for the
+  specific language governing permissions and limitations
+  under the License.
+-->
+
+# Apache CloudStack - NetApp ONTAP Storage Plugin
+
+## Overview
+
+The NetApp ONTAP Storage Plugin provides integration between Apache CloudStack 
and NetApp ONTAP storage systems. This plugin enables CloudStack to provision 
and manage primary storage on ONTAP clusters, supporting both NAS (NFS) and SAN 
(iSCSI) protocols.
+
+## Features
+
+- **Primary Storage Support**: Provision and manage primary storage pools on 
NetApp ONTAP
+- **Multiple Protocols**: Support for NFS 3.0 and iSCSI protocols
+- **Unified Storage**: Integration with traditional ONTAP unified storage 
architecture
+- **KVM Hypervisor Support**: Supports KVM hypervisor environments
+- **Managed Storage**: Operates as managed storage with full lifecycle 
management
+- **Flexible Scoping**: Support for Zone-wide and Cluster-scoped storage pools
+
+## Architecture
+
+### Component Structure
+
+| Package | Description                                           |
+|---------|-------------------------------------------------------|
+| `driver` | Primary datastore driver implementation               |
+| `feign` | REST API clients and data models for ONTAP operations |
+| `lifecycle` | Storage pool lifecycle management                     |
+| `listener` | Host connection event handlers                        |
+| `provider` | Main provider and strategy factory                    |
+| `service` | ONTAP Storage strategy implementations (NAS/SAN)      |
+| `utils` | Constants and helper utilities                        |
+
+## Requirements
+
+### ONTAP Requirements
+
+- NetApp ONTAP 9.15.1 or higher
+- Storage Virtual Machine (SVM) configured with appropriate protocols enabled
+- Management LIF accessible from CloudStack management server
+- Data LIF(s) accessible from hypervisor hosts and are of IPv4 type
+- Aggregates assigned to the SVM with sufficient capacity
+
+### CloudStack Requirements
+
+- Apache CloudStack current version or higher
+- KVM hypervisor hosts
+- For iSCSI: Hosts must have iSCSI initiator configured with valid IQN
+- For NFS: Hosts must have NFS client packages installed
+
+### Minimum Volume Size
+
+ONTAP requires a minimum volume size of **1.56 GB** (1,677,721,600 bytes). The 
plugin will automatically adjust requested sizes below this threshold.
+
+## Configuration
+
+### Storage Pool Creation Parameters
+
+When creating an ONTAP primary storage pool, provide the following details in 
the URL field (semicolon-separated key=value pairs):
+
+| Parameter | Required | Description |
+|-----------|----------|-------------|
+| `username` | Yes | ONTAP cluster admin username |
+| `password` | Yes | ONTAP cluster admin password |
+| `svmName` | Yes | Storage Virtual Machine name |
+| `protocol` | Yes | Storage protocol (`NFS3` or `ISCSI`) |
+| `managementLIF` | Yes | ONTAP cluster management LIF IP address |

Review Comment:
   For this PR, user should pass these manually in the format mentioned in the 
doc. But, we have made UI changes, so, with the next PR, it should be proper.



##########
plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java:
##########
@@ -0,0 +1,452 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.service;
+
+import com.cloud.utils.exception.CloudRuntimeException;
+import feign.FeignException;
+import org.apache.cloudstack.storage.feign.FeignClientFactory;
+import org.apache.cloudstack.storage.feign.client.AggregateFeignClient;
+import org.apache.cloudstack.storage.feign.client.JobFeignClient;
+import org.apache.cloudstack.storage.feign.client.NetworkFeignClient;
+import org.apache.cloudstack.storage.feign.client.SANFeignClient;
+import org.apache.cloudstack.storage.feign.client.SvmFeignClient;
+import org.apache.cloudstack.storage.feign.client.VolumeFeignClient;
+import org.apache.cloudstack.storage.feign.model.Aggregate;
+import org.apache.cloudstack.storage.feign.model.IpInterface;
+import org.apache.cloudstack.storage.feign.model.IscsiService;
+import org.apache.cloudstack.storage.feign.model.Job;
+import org.apache.cloudstack.storage.feign.model.Nas;
+import org.apache.cloudstack.storage.feign.model.OntapStorage;
+import org.apache.cloudstack.storage.feign.model.Svm;
+import org.apache.cloudstack.storage.feign.model.Volume;
+import org.apache.cloudstack.storage.feign.model.response.JobResponse;
+import org.apache.cloudstack.storage.feign.model.response.OntapResponse;
+import org.apache.cloudstack.storage.service.model.AccessGroup;
+import org.apache.cloudstack.storage.service.model.CloudStackVolume;
+import org.apache.cloudstack.storage.service.model.ProtocolType;
+import org.apache.cloudstack.storage.utils.OntapStorageConstants;
+import org.apache.cloudstack.storage.utils.OntapStorageUtils;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+
+public abstract class StorageStrategy {
+    private final FeignClientFactory feignClientFactory;
+    private final AggregateFeignClient aggregateFeignClient;
+    private final VolumeFeignClient volumeFeignClient;
+    private final SvmFeignClient svmFeignClient;
+    private final JobFeignClient jobFeignClient;
+    private final NetworkFeignClient networkFeignClient;
+    private final SANFeignClient sanFeignClient;
+
+    protected OntapStorage storage;
+
+    private List<Aggregate> aggregates;
+
+    private static final Logger s_logger = 
LogManager.getLogger(StorageStrategy.class);
+
+    public StorageStrategy(OntapStorage ontapStorage) {
+        storage = ontapStorage;
+        String baseURL = OntapStorageConstants.HTTPS + 
storage.getManagementLIF();
+        s_logger.info("Initializing StorageStrategy with base URL: " + 
baseURL);
+        this.feignClientFactory = new FeignClientFactory();
+        this.aggregateFeignClient = 
feignClientFactory.createClient(AggregateFeignClient.class, baseURL);
+        this.volumeFeignClient = 
feignClientFactory.createClient(VolumeFeignClient.class, baseURL);
+        this.svmFeignClient = 
feignClientFactory.createClient(SvmFeignClient.class, baseURL);
+        this.jobFeignClient = 
feignClientFactory.createClient(JobFeignClient.class, baseURL);
+        this.networkFeignClient = 
feignClientFactory.createClient(NetworkFeignClient.class, baseURL);
+        this.sanFeignClient = 
feignClientFactory.createClient(SANFeignClient.class, baseURL);
+    }
+
+    public boolean connect() {
+        s_logger.info("Attempting to connect to ONTAP cluster at " + 
storage.getManagementLIF() + " and validate SVM " +
+                storage.getSvmName() + ", protocol " + storage.getProtocol());
+        String authHeader = 
OntapStorageUtils.generateAuthHeader(storage.getUsername(), 
storage.getPassword());
+        String svmName = storage.getSvmName();
+        try {
+            Svm svm = new Svm();
+            s_logger.info("Fetching the SVM details...");
+            Map<String, Object> queryParams = 
Map.of(OntapStorageConstants.NAME, svmName, OntapStorageConstants.FIELDS, 
OntapStorageConstants.AGGREGATES +
+                    OntapStorageConstants.COMMA + OntapStorageConstants.STATE);
+            OntapResponse<Svm> svms = 
svmFeignClient.getSvmResponse(queryParams, authHeader);
+            if (svms != null && svms.getRecords() != null && 
!svms.getRecords().isEmpty()) {
+                svm = svms.getRecords().get(0);
+            } else {
+                s_logger.error("No SVM found on the ONTAP cluster by the name" 
+ svmName + ".");
+                return false;
+            }
+
+            s_logger.info("Validating SVM state and protocol settings...");
+            if (!Objects.equals(svm.getState(), 
OntapStorageConstants.RUNNING)) {
+                s_logger.error("SVM " + svmName + " is not in running state.");
+                return false;
+            }
+            if (Objects.equals(storage.getProtocol(), 
OntapStorageConstants.NFS) && !svm.getNfsEnabled()) {
+                s_logger.error("NFS protocol is not enabled on SVM " + 
svmName);
+                return false;
+            } else if (Objects.equals(storage.getProtocol(), 
OntapStorageConstants.ISCSI) && !svm.getIscsiEnabled()) {
+                s_logger.error("iSCSI protocol is not enabled on SVM " + 
svmName);
+                return false;
+            }
+            List<Aggregate> aggrs = svm.getAggregates();
+            if (aggrs == null || aggrs.isEmpty()) {
+                s_logger.error("No aggregates are assigned to SVM " + svmName);
+                return false;
+            }
+            for (Aggregate aggr : aggrs) {
+                s_logger.debug("Found aggregate: " + aggr.getName() + " with 
UUID: " + aggr.getUuid());
+                Aggregate aggrResp = 
aggregateFeignClient.getAggregateByUUID(authHeader, aggr.getUuid());

Review Comment:
   This should not happen but will add the null to be on the safe side. Great 
catch! Thank you!



##########
plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java:
##########
@@ -0,0 +1,535 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.lifecycle;
+
+
+import com.cloud.agent.api.StoragePoolInfo;
+import com.cloud.dc.ClusterVO;
+import com.cloud.dc.dao.ClusterDao;
+import com.cloud.exception.InvalidParameterValueException;
+import com.cloud.host.HostVO;
+import com.cloud.hypervisor.Hypervisor;
+import com.cloud.resource.ResourceManager;
+import com.cloud.storage.Storage;
+import com.cloud.storage.StorageManager;
+import com.cloud.storage.StoragePool;
+import com.cloud.storage.StoragePoolAutomation;
+import com.cloud.utils.exception.CloudRuntimeException;
+import com.google.common.base.Preconditions;
+import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
+import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
+import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
+import 
org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle;
+import 
org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters;
+import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
+import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
+import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDetailsDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
+import 
org.apache.cloudstack.storage.datastore.lifecycle.BasePrimaryDataStoreLifeCycleImpl;
+import org.apache.cloudstack.storage.feign.model.OntapStorage;
+import org.apache.cloudstack.storage.feign.model.Volume;
+import org.apache.cloudstack.storage.provider.StorageProviderFactory;
+import org.apache.cloudstack.storage.service.StorageStrategy;
+import org.apache.cloudstack.storage.service.model.AccessGroup;
+import org.apache.cloudstack.storage.service.model.ProtocolType;
+import org.apache.cloudstack.storage.utils.OntapStorageConstants;
+import org.apache.cloudstack.storage.utils.OntapStorageUtils;
+import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+
+import javax.inject.Inject;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.UUID;
+
+public class OntapPrimaryDatastoreLifecycle extends 
BasePrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle {
+    @Inject private ClusterDao _clusterDao;
+    @Inject private StorageManager _storageMgr;
+    @Inject private ResourceManager _resourceMgr;
+    @Inject private PrimaryDataStoreHelper _dataStoreHelper;
+    @Inject private PrimaryDataStoreDetailsDao _datastoreDetailsDao;
+    @Inject private StoragePoolAutomation _storagePoolAutomation;
+    @Inject private PrimaryDataStoreDao storagePoolDao;
+    @Inject private StoragePoolDetailsDao storagePoolDetailsDao;
+    private static final Logger s_logger = 
LogManager.getLogger(OntapPrimaryDatastoreLifecycle.class);
+
+    private static final long ONTAP_MIN_VOLUME_SIZE = 1677721600L;
+
+    @Override
+    public DataStore initialize(Map<String, Object> dsInfos) {
+        if (dsInfos == null) {
+            throw new CloudRuntimeException("Datastore info map is null, 
cannot create primary storage");
+        }
+        String url = (String) dsInfos.get("url");
+        Long zoneId = (Long) dsInfos.get("zoneId");
+        Long podId = (Long) dsInfos.get("podId");
+        Long clusterId = (Long) dsInfos.get("clusterId");
+        String storagePoolName = (String) dsInfos.get("name");
+        String providerName = (String) dsInfos.get("providerName");
+        Long capacityBytes = (Long) dsInfos.get("capacityBytes");
+        boolean managed = (boolean) dsInfos.get("managed");
+        String tags = (String) dsInfos.get("tags");
+        Boolean isTagARule = (Boolean) dsInfos.get("isTagARule");

Review Comment:
   Sure, will check this out. Thank you!!



##########
plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Igroup.java:
##########
@@ -0,0 +1,255 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.feign.model;
+
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import 
org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
+
+import java.util.List;
+import java.util.Objects;
+
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonInclude(JsonInclude.Include.NON_NULL)
+public class Igroup {
+    @JsonProperty("delete_on_unmap")
+    private Boolean deleteOnUnmap = null;
+    @JsonProperty("initiators")
+    private List<Initiator> initiators = null;
+    @JsonProperty("lun_maps")
+    private List<LunMap> lunMaps = null;
+    @JsonProperty("os_type")
+    private OsTypeEnum osType = null;
+
+    @JsonProperty("parent_igroups")
+    private List<Igroup> parentIgroups = null;
+
+    @JsonProperty("igroups")
+    private List<Igroup> igroups = null;
+
+    @JsonProperty("name")
+    private String name = null;
+
+    @JsonProperty("protocol")
+    private ProtocolEnum protocol = null;
+    @JsonProperty("svm")
+    private Svm svm = null;
+    @JsonProperty("uuid")
+    private String uuid = null;
+
+    public enum OsTypeEnum {
+        hyper_v("hyper_v"),

Review Comment:
   Will take this up, thanks!



##########
plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/FeignConfiguration.java:
##########
@@ -0,0 +1,158 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.feign;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import feign.RequestInterceptor;
+import feign.Retryer;
+import feign.Client;
+import feign.httpclient.ApacheHttpClient;
+import feign.codec.Decoder;
+import feign.codec.Encoder;
+import feign.Response;
+import feign.codec.DecodeException;
+import feign.codec.EncodeException;
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.databind.DeserializationFeature;
+import org.apache.http.conn.ConnectionKeepAliveStrategy;
+import org.apache.http.conn.ssl.NoopHostnameVerifier;
+import org.apache.http.conn.ssl.SSLConnectionSocketFactory;
+import org.apache.http.conn.ssl.TrustAllStrategy;
+import org.apache.http.impl.client.CloseableHttpClient;
+import org.apache.http.impl.client.HttpClientBuilder;
+import org.apache.http.ssl.SSLContexts;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+
+import javax.net.ssl.SSLContext;
+import java.io.IOException;
+import java.io.InputStream;
+import java.lang.reflect.Type;
+import java.nio.charset.StandardCharsets;
+import java.util.concurrent.TimeUnit;
+
+public class FeignConfiguration {
+    private static final Logger logger = 
LogManager.getLogger(FeignConfiguration.class);
+
+    private final int retryMaxAttempt = 3;
+    private final int retryMaxInterval = 5;
+    private final String ontapFeignMaxConnection = "80";
+    private final String ontapFeignMaxConnectionPerRoute = "20";
+    private final ObjectMapper objectMapper;
+
+    public FeignConfiguration() {
+        this.objectMapper = new ObjectMapper();
+        
this.objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, 
false);
+    }
+
+    public Client createClient() {
+        int maxConn;
+        int maxConnPerRoute;
+        try {
+            maxConn = Integer.parseInt(this.ontapFeignMaxConnection);
+        } catch (Exception e) {
+            logger.error("ontapFeignClient: parse max connection failed, using 
default");
+            maxConn = 20;

Review Comment:
   We have initially planned to have these values to be configurable, hence the 
try-catch block. The default was 20. We have received a similar comment before 
and are planning to evaluate the need for configuring it once users start 
testing the plugin. 



##########
plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java:
##########
@@ -0,0 +1,186 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.listener;
+
+import javax.inject.Inject;
+
+import com.cloud.agent.api.ModifyStoragePoolCommand;
+import com.cloud.agent.api.ModifyStoragePoolAnswer;
+import com.cloud.agent.api.StoragePoolInfo;
+import com.cloud.alert.AlertManager;
+import com.cloud.storage.StoragePoolHostVO;
+import com.cloud.storage.dao.StoragePoolHostDao;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.LogManager;
+import com.cloud.agent.AgentManager;
+import com.cloud.agent.api.Answer;
+import com.cloud.agent.api.DeleteStoragePoolCommand;
+import com.cloud.host.Host;
+import com.cloud.storage.StoragePool;
+import com.cloud.utils.exception.CloudRuntimeException;
+import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
+import 
org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
+import com.cloud.host.dao.HostDao;
+
+public class OntapHostListener implements HypervisorHostListener {
+    protected Logger logger = LogManager.getLogger(getClass());
+
+    @Inject
+    private AgentManager _agentMgr;
+    @Inject
+    private AlertManager _alertMgr;
+    @Inject
+    private PrimaryDataStoreDao _storagePoolDao;
+    @Inject
+    private HostDao _hostDao;
+    @Inject private StoragePoolHostDao storagePoolHostDao;
+
+
+    @Override
+    public boolean hostConnect(long hostId, long poolId)  {
+        logger.info("Connect to host " + hostId + " from pool " + poolId);
+        Host host = _hostDao.findById(hostId);
+        if (host == null) {
+            logger.error("host was not found with id : {}", hostId);
+            return false;
+        }
+
+        StoragePool pool = _storagePoolDao.findById(poolId);
+        if (pool == null) {
+            logger.error("Failed to connect host - storage pool not found with 
id: {}", poolId);
+            return false;
+        }
+        logger.info("Connecting host {} to ONTAP storage pool {}", 
host.getName(), pool.getName());
+        try {
+            ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, 
pool);
+
+            Answer answer = _agentMgr.easySend(hostId, cmd);
+
+            if (answer == null) {
+                throw new CloudRuntimeException(String.format("Unable to get 
an answer to the modify storage pool command (%s)", pool));
+            }
+
+            if (!answer.getResult()) {
+                String msg = String.format("Unable to attach storage pool %s 
to host %d", pool, hostId);
+
+                _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, 
pool.getDataCenterId(), pool.getPodId(), msg, msg);
+
+                throw new CloudRuntimeException(String.format(
+                        "Unable to establish a connection from agent to 
storage pool %s due to %s", pool, answer.getDetails()));
+            }
+
+            if (!(answer instanceof ModifyStoragePoolAnswer)) {
+                logger.error("Received unexpected answer type {} for storage 
pool {}", answer.getClass().getName(), pool.getName());
+                throw new CloudRuntimeException("Failed to connect to storage 
pool. Please check agent logs for details.");
+            }
+
+            ModifyStoragePoolAnswer mspAnswer = (ModifyStoragePoolAnswer) 
answer;
+            StoragePoolInfo poolInfo = mspAnswer.getPoolInfo();
+            if (poolInfo == null) {
+                throw new CloudRuntimeException("ModifyStoragePoolAnswer 
returned null poolInfo");
+            }
+
+            String localPath = poolInfo.getLocalPath();
+            logger.info("Storage pool {} successfully mounted at: {}", 
pool.getName(), localPath);
+
+            StoragePoolHostVO storagePoolHost = 
storagePoolHostDao.findByPoolHost(poolId, hostId);
+
+            if (storagePoolHost == null) {
+                storagePoolHost = new StoragePoolHostVO(poolId, hostId, 
localPath);
+                storagePoolHostDao.persist(storagePoolHost);
+                logger.info("Created storage_pool_host_ref entry for pool {} 
and host {}", pool.getName(), host.getName());
+            } else {
+                storagePoolHost.setLocalPath(localPath);
+                storagePoolHostDao.update(storagePoolHost.getId(), 
storagePoolHost);
+                logger.info("Updated storage_pool_host_ref entry with 
local_path: {}", localPath);
+            }
+
+            StoragePoolVO poolVO = _storagePoolDao.findById(poolId);
+            if (poolVO != null && poolInfo.getCapacityBytes() > 0) {
+                poolVO.setCapacityBytes(poolInfo.getCapacityBytes());
+                poolVO.setUsedBytes(poolInfo.getCapacityBytes() - 
poolInfo.getAvailableBytes());
+                _storagePoolDao.update(poolVO.getId(), poolVO);
+                logger.info("Updated storage pool capacity: {} GB, used: {} 
GB", poolInfo.getCapacityBytes() / (1024 * 1024 * 1024), 
(poolInfo.getCapacityBytes() - poolInfo.getAvailableBytes()) / (1024 * 1024 * 
1024));
+            }
+
+        } catch (Exception e) {
+            logger.error("Exception while connecting host {} to storage pool 
{}", host.getName(), pool.getName(), e);
+            return false;
+        }
+        return true;
+    }
+
+    @Override
+    public boolean hostDisconnected(Host host, StoragePool pool) {
+        logger.info("Disconnect from host " + host.getId() + " from pool " + 
pool.getName());
+
+        Host hostToremove = _hostDao.findById(host.getId());
+        if (hostToremove == null) {
+            logger.error("Failed to add host by HostListener as host was not 
found with id : {}", host.getId());
+            return false;
+        }
+        logger.info("Disconnecting host {} from ONTAP storage pool {}", 
host.getName(), pool.getName());
+
+        try {
+            DeleteStoragePoolCommand cmd = new DeleteStoragePoolCommand(pool);
+            long hostId = host.getId();
+            Answer answer = _agentMgr.easySend(hostId, cmd);
+
+            if (answer != null && answer.getResult()) {
+                logger.info("Successfully disconnected host {} from ONTAP 
storage pool {}", host.getName(), pool.getName());
+                return true;
+            } else {
+                String errMsg = (answer != null) ? answer.getDetails() : 
"Unknown error";
+                logger.warn("Failed to disconnect host {} from storage pool 
{}. Error: {}", host.getName(), pool.getName(), errMsg);
+                return false;
+            }
+        } catch (Exception e) {
+            logger.error("Exception while disconnecting host {} from storage 
pool {}", host.getName(), pool.getName(), e);
+            return false;
+        }
+    }
+
+    @Override
+    public boolean hostDisconnected(long hostId, long poolId) {
+        return false;

Review Comment:
   Yeah makes sense



##########
plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/OntapStorageUtils.java:
##########
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.utils;
+
+import com.cloud.storage.ScopeType;
+import com.cloud.utils.StringUtils;
+import com.cloud.utils.exception.CloudRuntimeException;
+import org.apache.cloudstack.storage.feign.model.OntapStorage;
+import org.apache.cloudstack.storage.provider.StorageProviderFactory;
+import org.apache.cloudstack.storage.service.StorageStrategy;
+import org.apache.cloudstack.storage.service.model.ProtocolType;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.springframework.util.Base64Utils;
+
+import java.nio.charset.StandardCharsets;
+import java.util.Map;
+
+public class OntapStorageUtils {
+
+    private static final Logger s_logger = 
LogManager.getLogger(OntapStorageUtils.class);
+
+    private static final String BASIC = "Basic";
+    private static final String AUTH_HEADER_COLON = ":";
+
+    public static String generateAuthHeader (String username, String password) 
{
+        byte[] encodedBytes = Base64Utils.encode((username + AUTH_HEADER_COLON 
+ password).getBytes(StandardCharsets.UTF_8));
+        return BASIC + StringUtils.SPACE + new String(encodedBytes);
+    }
+
+    public static StorageStrategy getStrategyByStoragePoolDetails(Map<String, 
String> details) {
+        if (details == null || details.isEmpty()) {
+            s_logger.error("getStrategyByStoragePoolDetails: Storage pool 
details are null or empty");
+            throw new CloudRuntimeException("getStrategyByStoragePoolDetails: 
Storage pool details are null or empty");

Review Comment:
   Yeah these have been corrected now. But, will push these with next PR, as 
there are some changes around such logs



##########
plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java:
##########
@@ -0,0 +1,452 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.service;
+
+import com.cloud.utils.exception.CloudRuntimeException;
+import feign.FeignException;
+import org.apache.cloudstack.storage.feign.FeignClientFactory;
+import org.apache.cloudstack.storage.feign.client.AggregateFeignClient;
+import org.apache.cloudstack.storage.feign.client.JobFeignClient;
+import org.apache.cloudstack.storage.feign.client.NetworkFeignClient;
+import org.apache.cloudstack.storage.feign.client.SANFeignClient;
+import org.apache.cloudstack.storage.feign.client.SvmFeignClient;
+import org.apache.cloudstack.storage.feign.client.VolumeFeignClient;
+import org.apache.cloudstack.storage.feign.model.Aggregate;
+import org.apache.cloudstack.storage.feign.model.IpInterface;
+import org.apache.cloudstack.storage.feign.model.IscsiService;
+import org.apache.cloudstack.storage.feign.model.Job;
+import org.apache.cloudstack.storage.feign.model.Nas;
+import org.apache.cloudstack.storage.feign.model.OntapStorage;
+import org.apache.cloudstack.storage.feign.model.Svm;
+import org.apache.cloudstack.storage.feign.model.Volume;
+import org.apache.cloudstack.storage.feign.model.response.JobResponse;
+import org.apache.cloudstack.storage.feign.model.response.OntapResponse;
+import org.apache.cloudstack.storage.service.model.AccessGroup;
+import org.apache.cloudstack.storage.service.model.CloudStackVolume;
+import org.apache.cloudstack.storage.service.model.ProtocolType;
+import org.apache.cloudstack.storage.utils.OntapStorageConstants;
+import org.apache.cloudstack.storage.utils.OntapStorageUtils;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+
+public abstract class StorageStrategy {
+    private final FeignClientFactory feignClientFactory;
+    private final AggregateFeignClient aggregateFeignClient;
+    private final VolumeFeignClient volumeFeignClient;
+    private final SvmFeignClient svmFeignClient;
+    private final JobFeignClient jobFeignClient;
+    private final NetworkFeignClient networkFeignClient;
+    private final SANFeignClient sanFeignClient;
+
+    protected OntapStorage storage;
+
+    private List<Aggregate> aggregates;
+
+    private static final Logger s_logger = 
LogManager.getLogger(StorageStrategy.class);
+
+    public StorageStrategy(OntapStorage ontapStorage) {
+        storage = ontapStorage;
+        String baseURL = OntapStorageConstants.HTTPS + 
storage.getManagementLIF();
+        s_logger.info("Initializing StorageStrategy with base URL: " + 
baseURL);
+        this.feignClientFactory = new FeignClientFactory();
+        this.aggregateFeignClient = 
feignClientFactory.createClient(AggregateFeignClient.class, baseURL);
+        this.volumeFeignClient = 
feignClientFactory.createClient(VolumeFeignClient.class, baseURL);
+        this.svmFeignClient = 
feignClientFactory.createClient(SvmFeignClient.class, baseURL);
+        this.jobFeignClient = 
feignClientFactory.createClient(JobFeignClient.class, baseURL);
+        this.networkFeignClient = 
feignClientFactory.createClient(NetworkFeignClient.class, baseURL);
+        this.sanFeignClient = 
feignClientFactory.createClient(SANFeignClient.class, baseURL);
+    }
+
+    public boolean connect() {
+        s_logger.info("Attempting to connect to ONTAP cluster at " + 
storage.getManagementLIF() + " and validate SVM " +
+                storage.getSvmName() + ", protocol " + storage.getProtocol());
+        String authHeader = 
OntapStorageUtils.generateAuthHeader(storage.getUsername(), 
storage.getPassword());
+        String svmName = storage.getSvmName();
+        try {
+            Svm svm = new Svm();
+            s_logger.info("Fetching the SVM details...");
+            Map<String, Object> queryParams = 
Map.of(OntapStorageConstants.NAME, svmName, OntapStorageConstants.FIELDS, 
OntapStorageConstants.AGGREGATES +
+                    OntapStorageConstants.COMMA + OntapStorageConstants.STATE);
+            OntapResponse<Svm> svms = 
svmFeignClient.getSvmResponse(queryParams, authHeader);
+            if (svms != null && svms.getRecords() != null && 
!svms.getRecords().isEmpty()) {
+                svm = svms.getRecords().get(0);
+            } else {
+                s_logger.error("No SVM found on the ONTAP cluster by the name" 
+ svmName + ".");
+                return false;
+            }
+
+            s_logger.info("Validating SVM state and protocol settings...");
+            if (!Objects.equals(svm.getState(), 
OntapStorageConstants.RUNNING)) {
+                s_logger.error("SVM " + svmName + " is not in running state.");
+                return false;
+            }
+            if (Objects.equals(storage.getProtocol(), 
OntapStorageConstants.NFS) && !svm.getNfsEnabled()) {
+                s_logger.error("NFS protocol is not enabled on SVM " + 
svmName);
+                return false;
+            } else if (Objects.equals(storage.getProtocol(), 
OntapStorageConstants.ISCSI) && !svm.getIscsiEnabled()) {
+                s_logger.error("iSCSI protocol is not enabled on SVM " + 
svmName);
+                return false;
+            }
+            List<Aggregate> aggrs = svm.getAggregates();
+            if (aggrs == null || aggrs.isEmpty()) {
+                s_logger.error("No aggregates are assigned to SVM " + svmName);
+                return false;
+            }
+            for (Aggregate aggr : aggrs) {
+                s_logger.debug("Found aggregate: " + aggr.getName() + " with 
UUID: " + aggr.getUuid());
+                Aggregate aggrResp = 
aggregateFeignClient.getAggregateByUUID(authHeader, aggr.getUuid());
+                if (!Objects.equals(aggrResp.getState(), 
Aggregate.StateEnum.ONLINE)) {
+                    s_logger.warn("Aggregate " + aggr.getName() + " is not in 
online state. Skipping this aggregate.");
+                    continue;
+                } else if (aggrResp.getSpace() == null || 
aggrResp.getAvailableBlockStorageSpace() == null ||
+                        aggrResp.getAvailableBlockStorageSpace() <= 
storage.getSize().doubleValue()) {
+                    s_logger.warn("Aggregate " + aggr.getName() + " does not 
have sufficient available space. Skipping this aggregate.");
+                    continue;
+                }
+                s_logger.info("Selected aggregate: " + aggr.getName() + " for 
volume operations.");
+                this.aggregates = List.of(aggr);
+                break;
+            }
+            if (this.aggregates == null || this.aggregates.isEmpty()) {
+                s_logger.error("No suitable aggregates found on SVM " + 
svmName + " for volume creation.");
+                return false;
+            }
+
+            s_logger.info("Successfully connected to ONTAP cluster and 
validated ONTAP details provided");
+        } catch (Exception e) {
+            s_logger.error("Failed to connect to ONTAP cluster: " + 
e.getMessage(), e);
+            return false;
+        }
+        return true;
+    }
+
+    public Volume createStorageVolume(String volumeName, Long size) {
+        s_logger.info("Creating volume: " + volumeName + " of size: " + size + 
" bytes");
+
+        String svmName = storage.getSvmName();
+        if (aggregates == null || aggregates.isEmpty()) {
+            s_logger.error("No aggregates available to create volume on SVM " 
+ svmName);
+            throw new CloudRuntimeException("No aggregates available to create 
volume on SVM " + svmName);
+        }
+        if (size == null || size <= 0) {
+            throw new CloudRuntimeException("Invalid volume size provided: " + 
size);
+        }
+
+        String authHeader = 
OntapStorageUtils.generateAuthHeader(storage.getUsername(), 
storage.getPassword());
+
+        Volume volumeRequest = new Volume();
+        Svm svm = new Svm();
+        svm.setName(svmName);
+        Nas nas = new Nas();
+        nas.setPath(OntapStorageConstants.SLASH + volumeName);
+
+        volumeRequest.setName(volumeName);
+        volumeRequest.setSvm(svm);
+
+        long maxAvailableAggregateSpaceBytes = -1L;
+        Aggregate aggrChosen = null;
+        for (Aggregate aggr : aggregates) {
+            s_logger.debug("Found aggregate: " + aggr.getName() + " with UUID: 
" + aggr.getUuid());
+            Aggregate aggrResp = 
aggregateFeignClient.getAggregateByUUID(authHeader, aggr.getUuid());
+
+            if (aggrResp == null) {
+                s_logger.warn("Aggregate details response is null for 
aggregate " + aggr.getName() + ". Skipping.");
+                continue;
+            }
+
+            if (!Objects.equals(aggrResp.getState(), 
Aggregate.StateEnum.ONLINE)) {
+                s_logger.warn("Aggregate " + aggr.getName() + " is not in 
online state. Skipping this aggregate.");
+                continue;
+            }
+
+            if (aggrResp.getSpace() == null || 
aggrResp.getAvailableBlockStorageSpace() == null) {
+                s_logger.warn("Aggregate " + aggr.getName() + " does not have 
space information. Skipping this aggregate.");
+                continue;
+            }
+
+            final long availableBytes = 
aggrResp.getAvailableBlockStorageSpace().longValue();
+            s_logger.debug("Aggregate " + aggr.getName() + " available bytes=" 
+ availableBytes + ", requested=" + size);
+
+            if (availableBytes <= size) {
+                s_logger.warn("Aggregate " + aggr.getName() + " does not have 
sufficient available space. Required=" +
+                        size + " bytes, available=" + availableBytes + " 
bytes. Skipping this aggregate.");
+                continue;
+            }

Review Comment:
   Sure, will check this out



##########
plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java:
##########
@@ -0,0 +1,319 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.service;
+
+import com.cloud.host.HostVO;
+import com.cloud.storage.dao.VolumeDao;
+import com.cloud.utils.exception.CloudRuntimeException;
+import feign.FeignException;
+import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector;
+import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
+import org.apache.cloudstack.storage.feign.FeignClientFactory;
+import org.apache.cloudstack.storage.feign.client.JobFeignClient;
+import org.apache.cloudstack.storage.feign.client.NASFeignClient;
+import org.apache.cloudstack.storage.feign.client.VolumeFeignClient;
+import org.apache.cloudstack.storage.feign.model.ExportPolicy;
+import org.apache.cloudstack.storage.feign.model.ExportRule;
+import org.apache.cloudstack.storage.feign.model.Job;
+import org.apache.cloudstack.storage.feign.model.Nas;
+import org.apache.cloudstack.storage.feign.model.OntapStorage;
+import org.apache.cloudstack.storage.feign.model.Svm;
+import org.apache.cloudstack.storage.feign.model.Volume;
+import org.apache.cloudstack.storage.feign.model.response.JobResponse;
+import org.apache.cloudstack.storage.feign.model.response.OntapResponse;
+import org.apache.cloudstack.storage.service.model.AccessGroup;
+import org.apache.cloudstack.storage.service.model.CloudStackVolume;
+import org.apache.cloudstack.storage.utils.OntapStorageConstants;
+import org.apache.cloudstack.storage.utils.OntapStorageUtils;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+
+import javax.inject.Inject;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+public class UnifiedNASStrategy extends NASStrategy {
+
+    private static final Logger s_logger = 
LogManager.getLogger(UnifiedNASStrategy.class);
+    private final FeignClientFactory feignClientFactory;
+    private final NASFeignClient nasFeignClient;
+    private final VolumeFeignClient volumeFeignClient;
+    private final JobFeignClient jobFeignClient;
+    @Inject private VolumeDao volumeDao;
+    @Inject private EndPointSelector epSelector;
+    @Inject private StoragePoolDetailsDao storagePoolDetailsDao;
+
+    public UnifiedNASStrategy(OntapStorage ontapStorage) {
+        super(ontapStorage);
+        String baseURL = OntapStorageConstants.HTTPS + 
ontapStorage.getManagementLIF();
+        this.feignClientFactory = new FeignClientFactory();
+        this.nasFeignClient = 
feignClientFactory.createClient(NASFeignClient.class, baseURL);
+        this.volumeFeignClient = 
feignClientFactory.createClient(VolumeFeignClient.class,baseURL );
+        this.jobFeignClient = 
feignClientFactory.createClient(JobFeignClient.class, baseURL );
+    }
+
+    public void setOntapStorage(OntapStorage ontapStorage) {
+        this.storage = ontapStorage;
+    }
+
+    @Override
+    public CloudStackVolume createCloudStackVolume(CloudStackVolume 
cloudstackVolume) {
+        return null;
+    }
+
+    @Override
+    CloudStackVolume updateCloudStackVolume(CloudStackVolume cloudstackVolume) 
{
+        return null;
+    }
+
+    @Override
+    public void deleteCloudStackVolume(CloudStackVolume cloudstackVolume) {
+    }
+
+    @Override
+    public void copyCloudStackVolume(CloudStackVolume cloudstackVolume) {
+
+    }
+
+    @Override
+    public CloudStackVolume getCloudStackVolume(Map<String, String> 
cloudStackVolumeMap) {
+        return null;
+    }
+
+    @Override
+    public AccessGroup createAccessGroup(AccessGroup accessGroup) {
+        s_logger.info("createAccessGroup: Create access group {}: " , 
accessGroup);
+        Map<String, String> details = 
accessGroup.getPrimaryDataStoreInfo().getDetails();
+        String svmName = details.get(OntapStorageConstants.SVM_NAME);
+        String volumeUUID = details.get(OntapStorageConstants.VOLUME_UUID);
+        String volumeName = details.get(OntapStorageConstants.VOLUME_NAME);
+
+        ExportPolicy policyRequest = 
createExportPolicyRequest(accessGroup,svmName,volumeName);
+        try {
+            ExportPolicy createdPolicy = createExportPolicy(svmName, 
policyRequest);
+            s_logger.info("ExportPolicy created: {}, now attaching this policy 
to storage pool volume", createdPolicy.getName());
+            assignExportPolicyToVolume(volumeUUID,createdPolicy.getName());
+            
storagePoolDetailsDao.addDetail(accessGroup.getPrimaryDataStoreInfo().getId(), 
OntapStorageConstants.EXPORT_POLICY_ID, String.valueOf(createdPolicy.getId()), 
true);
+            
storagePoolDetailsDao.addDetail(accessGroup.getPrimaryDataStoreInfo().getId(), 
OntapStorageConstants.EXPORT_POLICY_NAME, createdPolicy.getName(), true);
+            s_logger.info("Successfully assigned exportPolicy {} to volume 
{}", policyRequest.getName(), volumeName);
+            accessGroup.setPolicy(policyRequest);
+            return accessGroup;
+        }catch(Exception e){
+            s_logger.error("Exception occurred while creating access group: " 
+  e);
+            throw new CloudRuntimeException("Failed to create access group: " 
+ e);
+        }
+    }
+
+    @Override
+    public void deleteAccessGroup(AccessGroup accessGroup) {
+        s_logger.info("deleteAccessGroup: Deleting export policy");
+
+        if (accessGroup == null) {
+            throw new CloudRuntimeException("deleteAccessGroup: Invalid 
accessGroup object - accessGroup is null");
+        }
+
+        PrimaryDataStoreInfo primaryDataStoreInfo = 
accessGroup.getPrimaryDataStoreInfo();
+        if (primaryDataStoreInfo == null) {
+            throw new CloudRuntimeException("deleteAccessGroup: 
PrimaryDataStoreInfo is null in accessGroup");
+        }
+        s_logger.info("deleteAccessGroup: Deleting export policy for the 
storage pool {}", primaryDataStoreInfo.getName());
+        try {
+            String authHeader = 
OntapStorageUtils.generateAuthHeader(storage.getUsername(), 
storage.getPassword());
+            String svmName = storage.getSvmName();
+            String exportPolicyName = 
primaryDataStoreInfo.getDetails().get(OntapStorageConstants.EXPORT_POLICY_NAME);
+            String exportPolicyId = 
primaryDataStoreInfo.getDetails().get(OntapStorageConstants.EXPORT_POLICY_ID);
+
+            try {
+                
nasFeignClient.deleteExportPolicyById(authHeader,exportPolicyId);
+                s_logger.info("deleteAccessGroup: Successfully deleted export 
policy '{}'", exportPolicyName);
+            } catch (Exception e) {
+                s_logger.error("deleteAccessGroup: Failed to delete export 
policy. Exception: {}", e.getMessage(), e);
+                throw new CloudRuntimeException("Failed to delete export 
policy: " + e.getMessage(), e);
+            }
+        } catch (Exception e) {
+            s_logger.error("deleteAccessGroup: Failed to delete export policy. 
Exception: {}", e.getMessage(), e);
+            throw new CloudRuntimeException("Failed to delete export policy: " 
+ e.getMessage(), e);
+        }
+    }
+
+    @Override
+    public AccessGroup updateAccessGroup(AccessGroup accessGroup) {
+        return null;
+    }
+
+    @Override
+    public AccessGroup getAccessGroup(Map<String, String> values) {
+        return null;
+    }
+
+    @Override
+    public Map <String, String> enableLogicalAccess(Map<String, String> 
values) {
+        return null;
+    }
+
+    @Override
+    public void disableLogicalAccess(Map<String, String> values) {
+    }
+
+    @Override
+    public Map<String, String> getLogicalAccess(Map<String, String> values) {
+        return null;
+    }
+
+
+    private ExportPolicy createExportPolicy(String svmName, ExportPolicy 
policy) {
+        s_logger.info("Creating export policy: {} for SVM: {}", policy, 
svmName);
+
+        try {
+            String authHeader = 
OntapStorageUtils.generateAuthHeader(storage.getUsername(), 
storage.getPassword());
+            nasFeignClient.createExportPolicy(authHeader,  policy);
+            OntapResponse<ExportPolicy> policiesResponse = null;
+            try {
+                Map<String, Object> queryParams = 
Map.of(OntapStorageConstants.NAME, policy.getName());
+                policiesResponse = 
nasFeignClient.getExportPolicyResponse(authHeader, queryParams);
+                if (policiesResponse == null || 
policiesResponse.getRecords().isEmpty()) {
+                    throw new CloudRuntimeException("Export policy " + 
policy.getName() + " was not created on ONTAP. " +
+                            "Received successful response but policy does not 
exist.");
+                }
+                s_logger.info("Export policy created and verified 
successfully: " + policy.getName());
+            } catch (FeignException e) {
+                s_logger.error("Failed to verify export policy creation: " + 
policy.getName(), e);
+                throw new CloudRuntimeException("Export policy creation 
verification failed: " + e.getMessage());
+            }
+            s_logger.info("Export policy created successfully with name {}", 
policy.getName());
+            return policiesResponse.getRecords().get(0);
+        } catch (FeignException e) {
+            s_logger.error("Failed to create export policy: {}", policy, e);
+            throw new CloudRuntimeException("Failed to create export policy: " 
+ e.getMessage());
+        } catch (Exception e) {
+            s_logger.error("Exception while creating export policy: {}", 
policy, e);
+            throw new CloudRuntimeException("Failed to create export policy: " 
+ e.getMessage());
+        }
+    }
+
+    private void deleteExportPolicy(String svmName, String policyName) {

Review Comment:
   Yes, this isn't being used, will remove it.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to