This is an automated email from the ASF dual-hosted git repository.

dahn pushed a commit to branch 4.18
in repository https://gitbox.apache.org/repos/asf/cloudstack.git


The following commit(s) were added to refs/heads/4.18 by this push:
     new f32a63be602 Storage and volumes statistics tasks for StorPool primary 
storage (#7404)
f32a63be602 is described below

commit f32a63be602e4c3fcbb1f18d03e7b548f55ea533
Author: slavkap <[email protected]>
AuthorDate: Wed Jul 19 11:48:36 2023 +0300

    Storage and volumes statistics tasks for StorPool primary storage (#7404)
---
 .../driver/StorPoolPrimaryDataStoreDriver.java     |  42 ++++-
 .../datastore/driver/StorPoolStatsCollector.java   | 188 +++++++++++++++++++++
 .../storage/datastore/util/StorPoolUtil.java       |  12 ++
 .../snapshot/StorPoolConfigurationManager.java     |  12 +-
 .../spring-storage-volume-storpool-context.xml     |   3 +
 5 files changed, 254 insertions(+), 3 deletions(-)

diff --git 
a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/driver/StorPoolPrimaryDataStoreDriver.java
 
b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/driver/StorPoolPrimaryDataStoreDriver.java
index 896e12a3bc3..22ad73a118a 100644
--- 
a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/driver/StorPoolPrimaryDataStoreDriver.java
+++ 
b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/driver/StorPoolPrimaryDataStoreDriver.java
@@ -82,6 +82,7 @@ import 
org.apache.cloudstack.storage.command.StorageSubSystemCommand;
 import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
 import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao;
 import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO;
 import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
 import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
 import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao;
@@ -97,6 +98,7 @@ import org.apache.cloudstack.storage.to.TemplateObjectTO;
 import org.apache.cloudstack.storage.to.VolumeObjectTO;
 import org.apache.cloudstack.storage.volume.VolumeObject;
 import org.apache.commons.collections4.CollectionUtils;
+import org.apache.commons.collections4.MapUtils;
 import org.apache.log4j.Logger;
 
 import javax.inject.Inject;
@@ -1047,18 +1049,54 @@ public class StorPoolPrimaryDataStoreDriver implements 
PrimaryDataStoreDriver {
     }
 
     public boolean canProvideStorageStats() {
-        return false;
+        return StorPoolConfigurationManager.StorageStatsInterval.value() > 0;
     }
 
     public Pair<Long, Long> getStorageStats(StoragePool storagePool) {
+        if (storagePool == null) {
+            return null;
+        }
+        Map<Long, Map<String, Pair<Long, Long>>> templatesStats = 
StorPoolStatsCollector.templatesStats;
+        if (MapUtils.isNotEmpty(templatesStats) && 
templatesStats.containsKey(storagePool.getDataCenterId())) {
+            Map<String, Pair<Long, Long>> storageStats = 
templatesStats.get(storagePool.getDataCenterId());
+            StoragePoolDetailVO templateName = 
storagePoolDetailsDao.findDetail(storagePool.getId(), StorPoolUtil.SP_TEMPLATE);
+            if (storageStats.containsKey(templateName.getValue()) && 
templateName != null) {
+                Pair<Long, Long> stats = 
storageStats.get(templateName.getValue());
+                if (stats.first() != storagePool.getCapacityBytes()) {
+                    primaryStoreDao.updateCapacityBytes(storagePool.getId(), 
stats.first());
+                }
+                return storageStats.get(templateName.getValue());
+            }
+        }
         return null;
     }
 
     public boolean canProvideVolumeStats() {
-        return false;
+        return StorPoolConfigurationManager.VolumesStatsInterval.value() > 0;
     }
 
     public Pair<Long, Long> getVolumeStats(StoragePool storagePool, String 
volumeId) {
+
+        if (volumeId == null) {
+            return null;
+        }
+
+        Map<String, Pair<Long, Long>> volumesStats = 
StorPoolStatsCollector.volumesStats;
+        if (MapUtils.isNotEmpty(volumesStats)) {
+            Pair<Long, Long> volumeStats = 
volumesStats.get(StorPoolStorageAdaptor.getVolumeNameFromPath(volumeId, true));
+            if (volumeStats != null) {
+                return volumeStats;
+            }
+        } else {
+            List<VolumeVO> volumes = 
volumeDao.findByPoolId(storagePool.getId());
+            for (VolumeVO volume : volumes) {
+                if (volume.getPath() != null && 
volume.getPath().equals(volumeId)) {
+                    long size = volume.getSize();
+                    StorPoolUtil.spLog("Volume [%s] doesn't have any 
statistics, returning its size [%s]", volumeId, size);
+                    return new Pair<>(size, size);
+                }
+            }
+        }
         return null;
     }
 
diff --git 
a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/driver/StorPoolStatsCollector.java
 
b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/driver/StorPoolStatsCollector.java
new file mode 100644
index 00000000000..92a398934d0
--- /dev/null
+++ 
b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/driver/StorPoolStatsCollector.java
@@ -0,0 +1,188 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.cloudstack.storage.datastore.driver;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+
+import javax.inject.Inject;
+
+import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
+import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
+import org.apache.cloudstack.storage.datastore.util.StorPoolUtil;
+import org.apache.cloudstack.storage.snapshot.StorPoolConfigurationManager;
+import org.apache.commons.collections.CollectionUtils;
+import org.apache.log4j.Logger;
+
+import com.cloud.utils.NumbersUtil;
+import com.cloud.utils.Pair;
+import com.cloud.utils.component.ManagerBase;
+import com.cloud.utils.concurrency.NamedThreadFactory;
+import com.google.gson.JsonArray;
+import com.google.gson.JsonElement;
+import com.google.gson.JsonObject;
+
+public class StorPoolStatsCollector extends ManagerBase {
+
+    private static Logger log = Logger.getLogger(StorPoolStatsCollector.class);
+
+    @Inject
+    private PrimaryDataStoreDao storagePoolDao;
+    @Inject
+    private StoragePoolDetailsDao storagePoolDetailsDao;
+    @Inject
+    private ConfigurationDao configurationDao;
+
+    private ScheduledExecutorService executor;
+
+    static volatile Map<String, Pair<Long, Long>> volumesStats = new 
ConcurrentHashMap<>();
+    static volatile Map<Long, Map<String, Pair<Long, Long>>> templatesStats = 
new ConcurrentHashMap<>();
+
+
+    enum StorPoolObject {
+        VOLUME, TEMPLATE;
+    }
+
+    @Override
+    public boolean start() {
+        List<StoragePoolVO> spPools = 
storagePoolDao.findPoolsByProvider(StorPoolUtil.SP_PROVIDER_NAME);
+        if (CollectionUtils.isNotEmpty(spPools)) {
+            executor = Executors.newScheduledThreadPool(2,new 
NamedThreadFactory("StorPoolStatsCollector"));
+            long storageStatsInterval = 
NumbersUtil.parseLong(configurationDao.getValue("storage.stats.interval"), 
60000L);
+            long volumeStatsInterval = 
NumbersUtil.parseLong(configurationDao.getValue("volume.stats.interval"), 
60000L);
+
+            if (StorPoolConfigurationManager.VolumesStatsInterval.value() > 0 
&& volumeStatsInterval > 0) {
+                executor.scheduleAtFixedRate(new 
StorPoolVolumeStatsMonitorTask(),120, 
StorPoolConfigurationManager.VolumesStatsInterval.value(), TimeUnit.SECONDS);
+            }
+            if (StorPoolConfigurationManager.StorageStatsInterval.value() > 0 
&& storageStatsInterval > 0) {
+                executor.scheduleAtFixedRate(new 
StorPoolStorageStatsMonitorTask(), 120, 
StorPoolConfigurationManager.StorageStatsInterval.value(), TimeUnit.SECONDS);
+            }
+        }
+
+        return true;
+    }
+
+    class StorPoolVolumeStatsMonitorTask implements Runnable {
+
+        @Override
+        public void run() {
+            List<StoragePoolVO> spPools = 
storagePoolDao.findPoolsByProvider(StorPoolUtil.SP_PROVIDER_NAME);
+            if (CollectionUtils.isNotEmpty(spPools)) {
+                volumesStats.clear();
+
+                log.debug("Collecting StorPool volumes used space");
+                Map<Long, StoragePoolVO> onePoolforZone = new HashMap<>();
+                for (StoragePoolVO storagePoolVO : spPools) {
+                    onePoolforZone.put(storagePoolVO.getDataCenterId(), 
storagePoolVO);
+                }
+                for (StoragePoolVO storagePool : onePoolforZone.values()) {
+                    try {
+                        log.debug(String.format("Collecting volumes statistics 
for zone [%s]", storagePool.getDataCenterId()));
+                        JsonArray arr = 
StorPoolUtil.volumesSpace(StorPoolUtil.getSpConnection(storagePool.getUuid(),
+                                storagePool.getId(), storagePoolDetailsDao, 
storagePoolDao));
+                        
volumesStats.putAll(getClusterVolumeOrTemplateSpace(arr, 
StorPoolObject.VOLUME));
+                    } catch (Exception e) {
+                        log.debug(String.format("Could not collect StorPool 
volumes statistics due to %s", e.getMessage()));
+                    }
+                }
+            }
+        }
+    }
+
+    class StorPoolStorageStatsMonitorTask implements Runnable {
+
+        @Override
+        public void run() {
+            List<StoragePoolVO> spPools = 
storagePoolDao.findPoolsByProvider(StorPoolUtil.SP_PROVIDER_NAME);
+            if (CollectionUtils.isNotEmpty(spPools)) {
+                templatesStats.clear();
+
+                Map<Long, StoragePoolVO> onePoolforZone = new HashMap<>();
+                for (StoragePoolVO storagePoolVO : spPools) {
+                    onePoolforZone.put(storagePoolVO.getDataCenterId(), 
storagePoolVO);
+                }
+                for (StoragePoolVO storagePool : onePoolforZone.values()) {
+                    try {
+                        log.debug(String.format("Collecting templates 
statistics for zone [%s]", storagePool.getDataCenterId()));
+                        JsonArray arr = 
StorPoolUtil.templatesStats(StorPoolUtil.getSpConnection(storagePool.getUuid(),
+                                storagePool.getId(), storagePoolDetailsDao, 
storagePoolDao));
+                        templatesStats.put(storagePool.getDataCenterId(), 
getClusterVolumeOrTemplateSpace(arr, StorPoolObject.TEMPLATE));
+                    } catch (Exception e) {
+                        log.debug(String.format("Could not collect StorPool 
templates statistics %s", e.getMessage()));
+                    }
+                }
+            }
+        }
+    }
+
+    private Map<String, Pair<Long, Long>> 
getClusterVolumeOrTemplateSpace(JsonArray arr, StorPoolObject spObject) {
+        Map<String, Pair<Long, Long>> map = new HashMap<>();
+        for (JsonElement jsonElement : arr) {
+            JsonObject name = 
jsonElement.getAsJsonObject().getAsJsonObject("response");
+            if (name != null) {
+                JsonArray data = name.getAsJsonObject().getAsJsonArray("data");
+                if (StorPoolObject.VOLUME == spObject) {
+                    map.putAll(getStatsForVolumes(data));
+                } else if (StorPoolObject.TEMPLATE == spObject) {
+                    getClusterStats(data, map);
+                }
+            } else if (StorPoolObject.TEMPLATE == spObject) {
+                return map;
+            }
+        }
+        return map;
+    }
+
+    private Map<String, Pair<Long, Long>> getStatsForVolumes(JsonArray arr) {
+        Map<String, Pair<Long, Long>> map = new HashMap<>();
+        for (int i = 0; i < arr.size(); i++) {
+            String name = 
arr.get(i).getAsJsonObject().get("name").getAsString();
+            if (!name.startsWith("*") && !name.contains("@")) {
+                Long spaceUsed = 
arr.get(i).getAsJsonObject().get("spaceUsed").getAsLong();
+                Long size = 
arr.get(i).getAsJsonObject().get("size").getAsLong();
+                map.put(name, new Pair<>(spaceUsed, size));
+            }
+        }
+        return map;
+    }
+
+    private void getClusterStats(JsonArray data, Map<String, Pair<Long, Long>> 
map) {
+        for (JsonElement dat : data) {
+            long capacity = 
dat.getAsJsonObject().get("stored").getAsJsonObject().get("capacity").getAsLong();
+            long free = 
dat.getAsJsonObject().get("stored").getAsJsonObject().get("free").getAsLong();
+            long used = capacity - free;
+            String templateName = 
dat.getAsJsonObject().get("name").getAsString();
+            if (!map.containsKey(templateName)) {
+                map.put(templateName, new Pair<>(capacity, used));
+            } else {
+                Pair<Long, Long> template = map.get(templateName);
+                template.first(template.first() + capacity);
+                template.second(template.second() + used);
+                map.put(templateName, template);
+            }
+        }
+    }
+}
\ No newline at end of file
diff --git 
a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/util/StorPoolUtil.java
 
b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/util/StorPoolUtil.java
index f859a46ba36..a7ff6268b5a 100644
--- 
a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/util/StorPoolUtil.java
+++ 
b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/util/StorPoolUtil.java
@@ -406,6 +406,18 @@ public class StorPoolUtil {
         return data;
     }
 
+    public static JsonArray volumesSpace(SpConnectionDesc conn) {
+        SpApiResponse resp = GET("MultiCluster/AllClusters/VolumesSpace", 
conn);
+        JsonObject obj = resp.fullJson.getAsJsonObject();
+        return obj.getAsJsonObject("data").getAsJsonArray("clusters");
+    }
+
+    public static JsonArray templatesStats(SpConnectionDesc conn) {
+        SpApiResponse resp = 
GET("MultiCluster/AllClusters/VolumeTemplatesStatus", conn);
+        JsonObject obj = resp.fullJson.getAsJsonObject();
+        return obj.getAsJsonObject("data").getAsJsonArray("clusters");
+    }
+
     private static boolean objectExists(SpApiError err) {
         if (!err.getName().equals("objectDoesNotExist")) {
             throw new CloudRuntimeException(err.getDescr());
diff --git 
a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/snapshot/StorPoolConfigurationManager.java
 
b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/snapshot/StorPoolConfigurationManager.java
index 782d8133813..dcb2b226467 100644
--- 
a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/snapshot/StorPoolConfigurationManager.java
+++ 
b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/snapshot/StorPoolConfigurationManager.java
@@ -34,6 +34,16 @@ public class StorPoolConfigurationManager implements 
Configurable {
     public static final ConfigKey<String> AlternativeEndpoint = new 
ConfigKey<String>(String.class, "sp.alternative.endpoint", "Advanced", "",
             "Used for StorPool primary storage for an alternative endpoint. 
Structure of the endpoint is - 
SP_API_HTTP=address:port;SP_AUTH_TOKEN=token;SP_TEMPLATE=template_name", true, 
ConfigKey.Scope.StoragePool, null);
 
+    public static final ConfigKey<Integer> VolumesStatsInterval = new 
ConfigKey<>("Advanced", Integer.class,
+            "storpool.volumes.stats.interval", "3600",
+            "The interval in seconds to get StorPool volumes statistics",
+            false);
+
+    public static final ConfigKey<Integer> StorageStatsInterval = new 
ConfigKey<>("Advanced", Integer.class,
+            "storpool.storage.stats.interval", "3600",
+            "The interval in seconds to get StorPool template statistics",
+            false);
+
     @Override
     public String getConfigComponentName() {
         return StorPoolConfigurationManager.class.getSimpleName();
@@ -41,6 +51,6 @@ public class StorPoolConfigurationManager implements 
Configurable {
 
     @Override
     public ConfigKey<?>[] getConfigKeys() {
-        return new ConfigKey<?>[] { BypassSecondaryStorage, StorPoolClusterId, 
AlternativeEndPointEnabled, AlternativeEndpoint };
+        return new ConfigKey<?>[] { BypassSecondaryStorage, StorPoolClusterId, 
AlternativeEndPointEnabled, AlternativeEndpoint, VolumesStatsInterval, 
StorageStatsInterval };
     }
 }
diff --git 
a/plugins/storage/volume/storpool/src/main/resources/META-INF/cloudstack/storage-volume-storpool/spring-storage-volume-storpool-context.xml
 
b/plugins/storage/volume/storpool/src/main/resources/META-INF/cloudstack/storage-volume-storpool/spring-storage-volume-storpool-context.xml
index cf1db3a8bf2..6451fc8fd39 100644
--- 
a/plugins/storage/volume/storpool/src/main/resources/META-INF/cloudstack/storage-volume-storpool/spring-storage-volume-storpool-context.xml
+++ 
b/plugins/storage/volume/storpool/src/main/resources/META-INF/cloudstack/storage-volume-storpool/spring-storage-volume-storpool-context.xml
@@ -35,4 +35,7 @@
 
        <bean id="cleanupTags"
                
class="org.apache.cloudstack.storage.collector.StorPoolAbandonObjectsCollector" 
/>
+
+    <bean id="statistics"
+        
class="org.apache.cloudstack.storage.datastore.driver.StorPoolStatsCollector" />
 </beans>

Reply via email to