bhattmanish98 commented on code in PR #8137:
URL: https://github.com/apache/hadoop/pull/8137#discussion_r2647722292
##########
hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java:
##########
@@ -281,42 +303,84 @@ private AbfsClient(final URL baseUrl,
new ThreadFactoryBuilder().setNameFormat("AbfsClient Lease
Ops").setDaemon(true).build();
this.executorService = MoreExecutors.listeningDecorator(
HadoopExecutors.newScheduledThreadPool(this.abfsConfiguration.getNumLeaseThreads(),
tf));
- this.metricFormat = abfsConfiguration.getMetricFormat();
+
this.isMetricCollectionEnabled.set(abfsConfiguration.isMetricsCollectionEnabled());
this.isMetricCollectionStopped = new AtomicBoolean(false);
this.metricAnalysisPeriod = abfsConfiguration.getMetricAnalysisTimeout();
this.metricIdlePeriod = abfsConfiguration.getMetricIdleTimeout();
- if (StringUtils.isNotEmpty(metricFormat.toString())) {
- String metricAccountName = abfsConfiguration.getMetricAccount();
- String metricAccountKey = abfsConfiguration.getMetricAccountKey();
- if (StringUtils.isNotEmpty(metricAccountName) &&
StringUtils.isNotEmpty(metricAccountKey)) {
- isMetricCollectionEnabled = true;
- abfsCounters.initializeMetrics(metricFormat);
- int dotIndex = metricAccountName.indexOf(AbfsHttpConstants.DOT);
- if (dotIndex <= 0) {
- throw new InvalidUriException(
- metricAccountName + " - account name is not fully qualified.");
+ if (isMetricCollectionEnabled()) {
+ try {
+ String metricAccountName = abfsConfiguration.getMetricAccount();
+ String metricAccountKey = abfsConfiguration.getMetricAccountKey();
+ this.metricFormat = abfsConfiguration.getMetricFormat();
+ abfsCounters.initializeMetrics(metricFormat, getAbfsConfiguration());
+ if (isNotEmpty(metricAccountName) && isNotEmpty(
+ metricAccountKey)) {
+ int dotIndex = metricAccountName.indexOf(AbfsHttpConstants.DOT);
+ if (dotIndex <= 0) {
+ throw new InvalidUriException(
+ metricAccountName + " - account name is not fully qualified.");
+ }
+ try {
+ metricSharedkeyCredentials = new SharedKeyCredentials(
+ metricAccountName.substring(0, dotIndex),
+ metricAccountKey);
+ hasSeparateMetricAccount = true;
+ setMetricsUrl(metricAccountName.startsWith(HTTPS_SCHEME)
+ ? metricAccountName : HTTPS_SCHEME + COLON
+ + FORWARD_SLASH + FORWARD_SLASH + metricAccountName);
+ } catch (IllegalArgumentException e) {
+ throw new IOException(
+ "Exception while initializing metric credentials ", e);
+ }
+ } else {
+ setMetricsUrl(baseUrlString.substring(0, indexLastForwardSlash + 1));
}
- try {
- metricSharedkeyCredentials = new SharedKeyCredentials(
- metricAccountName.substring(0, dotIndex),
- metricAccountKey);
- } catch (IllegalArgumentException e) {
- throw new IOException("Exception while initializing metric
credentials ", e);
+
+ // register the client to Aggregated Metrics Manager
+ this.aggregateMetricsManager.registerClient(accountName, this);
+
+ // Metrics emitter scheduler
+ this.metricsEmitScheduler
+ = Executors.newSingleThreadScheduledExecutor();
+ // run every 1 minute to check the metrics count
+ this.metricsEmitScheduler.scheduleAtFixedRate(
Review Comment:
Yes, it is as per design, each file system will emit the metrics to manager
class at regular interval if not closed. The singleton manager class will do
actual API call to send those collected metrics.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]