clintropolis commented on a change in pull request #9965:
URL: https://github.com/apache/druid/pull/9965#discussion_r439179110
##########
File path:
server/src/main/java/org/apache/druid/server/http/DataSourcesResource.java
##########
@@ -391,6 +396,123 @@ public Response getServedSegmentsInInterval(
return getServedSegmentsInInterval(dataSourceName, full != null,
theInterval::contains);
}
+ @GET
+ @Path("/{dataSourceName}/loadstatus")
+ @Produces(MediaType.APPLICATION_JSON)
+ @ResourceFilters(DatasourceResourceFilter.class)
+ public Response getDatasourceLoadstatus(
+ @PathParam("dataSourceName") String dataSourceName,
+ @QueryParam("interval") @Nullable final String interval,
+ @QueryParam("forceMetadataRefresh") @Nullable final Boolean
forceMetadataRefresh,
+ @QueryParam("simple") @Nullable final String simple,
+ @QueryParam("full") @Nullable final String full
+ )
+ {
+ final Interval theInterval;
+ if (interval == null) {
+ long defaultIntervalOffset = 14 * 24 * 60 * 60 * 1000;
+ long currentTimeInMs = System.currentTimeMillis();
+ theInterval = Intervals.utc(currentTimeInMs - defaultIntervalOffset,
currentTimeInMs);
+ } else {
+ theInterval = Intervals.of(interval.replace('_', '/'));
+ }
+
+ boolean requiresMetadataStorePoll = forceMetadataRefresh == null ? true :
forceMetadataRefresh;
Review comment:
I really don't think this should default to `true` since it is a heavy
operation, but also, since the docs recommend polling the API with the not
default option to determine when your segments are all available and only
calling with the default option once.
##########
File path:
server/src/main/java/org/apache/druid/server/http/DataSourcesResource.java
##########
@@ -391,6 +396,123 @@ public Response getServedSegmentsInInterval(
return getServedSegmentsInInterval(dataSourceName, full != null,
theInterval::contains);
}
+ @GET
+ @Path("/{dataSourceName}/loadstatus")
+ @Produces(MediaType.APPLICATION_JSON)
+ @ResourceFilters(DatasourceResourceFilter.class)
+ public Response getDatasourceLoadstatus(
+ @PathParam("dataSourceName") String dataSourceName,
+ @QueryParam("interval") @Nullable final String interval,
+ @QueryParam("forceMetadataRefresh") @Nullable final Boolean
forceMetadataRefresh,
+ @QueryParam("simple") @Nullable final String simple,
+ @QueryParam("full") @Nullable final String full
+ )
+ {
+ final Interval theInterval;
+ if (interval == null) {
+ long defaultIntervalOffset = 14 * 24 * 60 * 60 * 1000;
+ long currentTimeInMs = System.currentTimeMillis();
+ theInterval = Intervals.utc(currentTimeInMs - defaultIntervalOffset,
currentTimeInMs);
+ } else {
+ theInterval = Intervals.of(interval.replace('_', '/'));
+ }
+
+ boolean requiresMetadataStorePoll = forceMetadataRefresh == null ? true :
forceMetadataRefresh;
+
+ Optional<Iterable<DataSegment>> segments =
segmentsMetadataManager.iterateAllUsedNonOvershadowedSegmentsForDatasourceInterval(
+ dataSourceName,
+ theInterval,
+ requiresMetadataStorePoll
+ );
+
+ if (!segments.isPresent()) {
+ return logAndCreateDataSourceNotFoundResponse(dataSourceName);
+ }
+
+ if (simple != null) {
+ // Calculate resposne for simple mode
+ Map<SegmentId, SegmentLoadInfo> segmentLoadInfos =
serverInventoryView.getSegmentLoadInfos();
+ int numUnloadedSegments = 0;
+ for (DataSegment segment : segments.get()) {
+ if (!segmentLoadInfos.containsKey(segment.getId())) {
+ numUnloadedSegments++;
+ }
+ }
+ return Response.ok(
+ ImmutableMap.of(
+ dataSourceName,
+ numUnloadedSegments
+ )
+ ).build();
+ } else if (full != null) {
+ // Calculate resposne for full mode
Review comment:
resposne -> response
##########
File path:
server/src/main/java/org/apache/druid/server/http/DataSourcesResource.java
##########
@@ -391,6 +396,123 @@ public Response getServedSegmentsInInterval(
return getServedSegmentsInInterval(dataSourceName, full != null,
theInterval::contains);
}
+ @GET
+ @Path("/{dataSourceName}/loadstatus")
+ @Produces(MediaType.APPLICATION_JSON)
+ @ResourceFilters(DatasourceResourceFilter.class)
+ public Response getDatasourceLoadstatus(
+ @PathParam("dataSourceName") String dataSourceName,
+ @QueryParam("interval") @Nullable final String interval,
+ @QueryParam("forceMetadataRefresh") @Nullable final Boolean
forceMetadataRefresh,
+ @QueryParam("simple") @Nullable final String simple,
+ @QueryParam("full") @Nullable final String full
+ )
+ {
+ final Interval theInterval;
+ if (interval == null) {
+ long defaultIntervalOffset = 14 * 24 * 60 * 60 * 1000;
+ long currentTimeInMs = System.currentTimeMillis();
+ theInterval = Intervals.utc(currentTimeInMs - defaultIntervalOffset,
currentTimeInMs);
+ } else {
+ theInterval = Intervals.of(interval.replace('_', '/'));
+ }
+
+ boolean requiresMetadataStorePoll = forceMetadataRefresh == null ? true :
forceMetadataRefresh;
+
+ Optional<Iterable<DataSegment>> segments =
segmentsMetadataManager.iterateAllUsedNonOvershadowedSegmentsForDatasourceInterval(
+ dataSourceName,
+ theInterval,
+ requiresMetadataStorePoll
+ );
+
+ if (!segments.isPresent()) {
+ return logAndCreateDataSourceNotFoundResponse(dataSourceName);
+ }
+
+ if (simple != null) {
+ // Calculate resposne for simple mode
+ Map<SegmentId, SegmentLoadInfo> segmentLoadInfos =
serverInventoryView.getSegmentLoadInfos();
+ int numUnloadedSegments = 0;
+ for (DataSegment segment : segments.get()) {
+ if (!segmentLoadInfos.containsKey(segment.getId())) {
+ numUnloadedSegments++;
+ }
+ }
+ return Response.ok(
+ ImmutableMap.of(
+ dataSourceName,
+ numUnloadedSegments
+ )
+ ).build();
+ } else if (full != null) {
+ // Calculate resposne for full mode
+ final Map<String, Object2LongMap<String>>
underReplicationCountsPerDataSourcePerTier = new HashMap<>();
+ final List<Rule> rules =
metadataRuleManager.getRulesWithDefault(dataSourceName);
+ final Table<SegmentId, String, Integer> segmentsInCluster =
HashBasedTable.create();
+ final DateTime now = DateTimes.nowUtc();
+
+ for (DataSegment segment : segments.get()) {
+ for (DruidServer druidServer : serverInventoryView.getInventory()) {
+ String tier = druidServer.getTier();
+ SegmentId segmentId = segment.getId();
+ DruidDataSource druidDataSource =
druidServer.getDataSource(dataSourceName);
+ if (druidDataSource != null && druidDataSource.getSegment(segmentId)
!= null) {
+ Integer numReplicants = segmentsInCluster.get(segmentId, tier);
+ if (numReplicants == null) {
+ numReplicants = 0;
+ }
+ segmentsInCluster.put(segmentId, tier, numReplicants + 1);
+ }
+ }
+ }
+ for (DataSegment segment : segments.get()) {
+ for (final Rule rule : rules) {
+ if (!(rule instanceof LoadRule && rule.appliesTo(segment, now))) {
+ continue;
+ }
+ ((LoadRule) rule)
+ .getTieredReplicants()
+ .forEach((final String tier, final Integer ruleReplicants) -> {
+ Integer currentReplicantsRetVal =
segmentsInCluster.get(segment.getId(), tier);
+ int currentReplicants = currentReplicantsRetVal == null ? 0 :
currentReplicantsRetVal;
+ Object2LongMap<String> underReplicationPerDataSource =
underReplicationCountsPerDataSourcePerTier
+ .computeIfAbsent(tier, ignored -> new
Object2LongOpenHashMap<>());
+ ((Object2LongOpenHashMap<String>)
underReplicationPerDataSource)
+ .addTo(dataSourceName, Math.max(ruleReplicants -
currentReplicants, 0));
+ });
+ break; // only the first matching rule applies
+ }
+ }
+ return Response.ok(underReplicationCountsPerDataSourcePerTier).build();
+ } else {
+ // Calculate resposne for default mode
+ Map<SegmentId, SegmentLoadInfo> segmentLoadInfos =
serverInventoryView.getSegmentLoadInfos();
+ int numUsedSegments = 0;
Review comment:
super nit: it would probably be consistent to try to match the
terminology that appears in the system segments table, which uses 'published'
and 'available' for used and loaded
* `numUsedSegments` -> `numPublishedSegments`
* `numUnloadedSegments` -> `numUnavailableSegments`
##########
File path:
server/src/main/java/org/apache/druid/server/http/DataSourcesResource.java
##########
@@ -391,6 +396,123 @@ public Response getServedSegmentsInInterval(
return getServedSegmentsInInterval(dataSourceName, full != null,
theInterval::contains);
}
+ @GET
+ @Path("/{dataSourceName}/loadstatus")
+ @Produces(MediaType.APPLICATION_JSON)
+ @ResourceFilters(DatasourceResourceFilter.class)
+ public Response getDatasourceLoadstatus(
+ @PathParam("dataSourceName") String dataSourceName,
+ @QueryParam("interval") @Nullable final String interval,
+ @QueryParam("forceMetadataRefresh") @Nullable final Boolean
forceMetadataRefresh,
+ @QueryParam("simple") @Nullable final String simple,
+ @QueryParam("full") @Nullable final String full
+ )
+ {
+ final Interval theInterval;
+ if (interval == null) {
+ long defaultIntervalOffset = 14 * 24 * 60 * 60 * 1000;
+ long currentTimeInMs = System.currentTimeMillis();
+ theInterval = Intervals.utc(currentTimeInMs - defaultIntervalOffset,
currentTimeInMs);
+ } else {
+ theInterval = Intervals.of(interval.replace('_', '/'));
+ }
+
+ boolean requiresMetadataStorePoll = forceMetadataRefresh == null ? true :
forceMetadataRefresh;
+
+ Optional<Iterable<DataSegment>> segments =
segmentsMetadataManager.iterateAllUsedNonOvershadowedSegmentsForDatasourceInterval(
+ dataSourceName,
+ theInterval,
+ requiresMetadataStorePoll
+ );
+
+ if (!segments.isPresent()) {
+ return logAndCreateDataSourceNotFoundResponse(dataSourceName);
+ }
+
+ if (simple != null) {
Review comment:
It seems like there is quite a lot of logic in this API entry point
method, it would be worth breaking each of these blocks out into methods
dedicated for each response so it's a bit easier to follow
##########
File path:
server/src/main/java/org/apache/druid/server/http/DataSourcesResource.java
##########
@@ -391,6 +396,123 @@ public Response getServedSegmentsInInterval(
return getServedSegmentsInInterval(dataSourceName, full != null,
theInterval::contains);
}
+ @GET
+ @Path("/{dataSourceName}/loadstatus")
+ @Produces(MediaType.APPLICATION_JSON)
+ @ResourceFilters(DatasourceResourceFilter.class)
+ public Response getDatasourceLoadstatus(
+ @PathParam("dataSourceName") String dataSourceName,
+ @QueryParam("interval") @Nullable final String interval,
+ @QueryParam("forceMetadataRefresh") @Nullable final Boolean
forceMetadataRefresh,
+ @QueryParam("simple") @Nullable final String simple,
+ @QueryParam("full") @Nullable final String full
+ )
+ {
+ final Interval theInterval;
+ if (interval == null) {
+ long defaultIntervalOffset = 14 * 24 * 60 * 60 * 1000;
+ long currentTimeInMs = System.currentTimeMillis();
+ theInterval = Intervals.utc(currentTimeInMs - defaultIntervalOffset,
currentTimeInMs);
+ } else {
+ theInterval = Intervals.of(interval.replace('_', '/'));
+ }
+
+ boolean requiresMetadataStorePoll = forceMetadataRefresh == null ? true :
forceMetadataRefresh;
+
+ Optional<Iterable<DataSegment>> segments =
segmentsMetadataManager.iterateAllUsedNonOvershadowedSegmentsForDatasourceInterval(
+ dataSourceName,
+ theInterval,
+ requiresMetadataStorePoll
+ );
+
+ if (!segments.isPresent()) {
+ return logAndCreateDataSourceNotFoundResponse(dataSourceName);
+ }
+
+ if (simple != null) {
+ // Calculate resposne for simple mode
+ Map<SegmentId, SegmentLoadInfo> segmentLoadInfos =
serverInventoryView.getSegmentLoadInfos();
+ int numUnloadedSegments = 0;
+ for (DataSegment segment : segments.get()) {
+ if (!segmentLoadInfos.containsKey(segment.getId())) {
+ numUnloadedSegments++;
+ }
+ }
+ return Response.ok(
+ ImmutableMap.of(
+ dataSourceName,
+ numUnloadedSegments
+ )
+ ).build();
+ } else if (full != null) {
+ // Calculate resposne for full mode
+ final Map<String, Object2LongMap<String>>
underReplicationCountsPerDataSourcePerTier = new HashMap<>();
+ final List<Rule> rules =
metadataRuleManager.getRulesWithDefault(dataSourceName);
+ final Table<SegmentId, String, Integer> segmentsInCluster =
HashBasedTable.create();
+ final DateTime now = DateTimes.nowUtc();
+
+ for (DataSegment segment : segments.get()) {
+ for (DruidServer druidServer : serverInventoryView.getInventory()) {
+ String tier = druidServer.getTier();
+ SegmentId segmentId = segment.getId();
+ DruidDataSource druidDataSource =
druidServer.getDataSource(dataSourceName);
+ if (druidDataSource != null && druidDataSource.getSegment(segmentId)
!= null) {
+ Integer numReplicants = segmentsInCluster.get(segmentId, tier);
+ if (numReplicants == null) {
+ numReplicants = 0;
+ }
+ segmentsInCluster.put(segmentId, tier, numReplicants + 1);
+ }
+ }
+ }
+ for (DataSegment segment : segments.get()) {
+ for (final Rule rule : rules) {
+ if (!(rule instanceof LoadRule && rule.appliesTo(segment, now))) {
+ continue;
+ }
+ ((LoadRule) rule)
+ .getTieredReplicants()
+ .forEach((final String tier, final Integer ruleReplicants) -> {
+ Integer currentReplicantsRetVal =
segmentsInCluster.get(segment.getId(), tier);
+ int currentReplicants = currentReplicantsRetVal == null ? 0 :
currentReplicantsRetVal;
+ Object2LongMap<String> underReplicationPerDataSource =
underReplicationCountsPerDataSourcePerTier
+ .computeIfAbsent(tier, ignored -> new
Object2LongOpenHashMap<>());
+ ((Object2LongOpenHashMap<String>)
underReplicationPerDataSource)
+ .addTo(dataSourceName, Math.max(ruleReplicants -
currentReplicants, 0));
+ });
+ break; // only the first matching rule applies
+ }
+ }
+ return Response.ok(underReplicationCountsPerDataSourcePerTier).build();
+ } else {
+ // Calculate resposne for default mode
Review comment:
resposne -> response
##########
File path:
server/src/main/java/org/apache/druid/server/http/DataSourcesResource.java
##########
@@ -391,6 +396,123 @@ public Response getServedSegmentsInInterval(
return getServedSegmentsInInterval(dataSourceName, full != null,
theInterval::contains);
}
+ @GET
+ @Path("/{dataSourceName}/loadstatus")
+ @Produces(MediaType.APPLICATION_JSON)
+ @ResourceFilters(DatasourceResourceFilter.class)
+ public Response getDatasourceLoadstatus(
+ @PathParam("dataSourceName") String dataSourceName,
+ @QueryParam("interval") @Nullable final String interval,
+ @QueryParam("forceMetadataRefresh") @Nullable final Boolean
forceMetadataRefresh,
+ @QueryParam("simple") @Nullable final String simple,
+ @QueryParam("full") @Nullable final String full
+ )
+ {
+ final Interval theInterval;
+ if (interval == null) {
+ long defaultIntervalOffset = 14 * 24 * 60 * 60 * 1000;
+ long currentTimeInMs = System.currentTimeMillis();
+ theInterval = Intervals.utc(currentTimeInMs - defaultIntervalOffset,
currentTimeInMs);
+ } else {
+ theInterval = Intervals.of(interval.replace('_', '/'));
+ }
+
+ boolean requiresMetadataStorePoll = forceMetadataRefresh == null ? true :
forceMetadataRefresh;
+
+ Optional<Iterable<DataSegment>> segments =
segmentsMetadataManager.iterateAllUsedNonOvershadowedSegmentsForDatasourceInterval(
+ dataSourceName,
+ theInterval,
+ requiresMetadataStorePoll
+ );
+
+ if (!segments.isPresent()) {
+ return logAndCreateDataSourceNotFoundResponse(dataSourceName);
+ }
+
+ if (simple != null) {
+ // Calculate resposne for simple mode
Review comment:
resposne -> response
##########
File path:
server/src/main/java/org/apache/druid/server/http/DataSourcesResource.java
##########
@@ -391,6 +396,123 @@ public Response getServedSegmentsInInterval(
return getServedSegmentsInInterval(dataSourceName, full != null,
theInterval::contains);
}
+ @GET
+ @Path("/{dataSourceName}/loadstatus")
+ @Produces(MediaType.APPLICATION_JSON)
+ @ResourceFilters(DatasourceResourceFilter.class)
+ public Response getDatasourceLoadstatus(
+ @PathParam("dataSourceName") String dataSourceName,
+ @QueryParam("interval") @Nullable final String interval,
+ @QueryParam("forceMetadataRefresh") @Nullable final Boolean
forceMetadataRefresh,
+ @QueryParam("simple") @Nullable final String simple,
+ @QueryParam("full") @Nullable final String full
+ )
+ {
+ final Interval theInterval;
+ if (interval == null) {
+ long defaultIntervalOffset = 14 * 24 * 60 * 60 * 1000;
+ long currentTimeInMs = System.currentTimeMillis();
+ theInterval = Intervals.utc(currentTimeInMs - defaultIntervalOffset,
currentTimeInMs);
+ } else {
+ theInterval = Intervals.of(interval.replace('_', '/'));
+ }
+
+ boolean requiresMetadataStorePoll = forceMetadataRefresh == null ? true :
forceMetadataRefresh;
+
+ Optional<Iterable<DataSegment>> segments =
segmentsMetadataManager.iterateAllUsedNonOvershadowedSegmentsForDatasourceInterval(
+ dataSourceName,
+ theInterval,
+ requiresMetadataStorePoll
+ );
+
+ if (!segments.isPresent()) {
+ return logAndCreateDataSourceNotFoundResponse(dataSourceName);
+ }
+
+ if (simple != null) {
+ // Calculate resposne for simple mode
+ Map<SegmentId, SegmentLoadInfo> segmentLoadInfos =
serverInventoryView.getSegmentLoadInfos();
+ int numUnloadedSegments = 0;
+ for (DataSegment segment : segments.get()) {
+ if (!segmentLoadInfos.containsKey(segment.getId())) {
+ numUnloadedSegments++;
+ }
+ }
+ return Response.ok(
+ ImmutableMap.of(
+ dataSourceName,
+ numUnloadedSegments
+ )
+ ).build();
+ } else if (full != null) {
+ // Calculate resposne for full mode
+ final Map<String, Object2LongMap<String>>
underReplicationCountsPerDataSourcePerTier = new HashMap<>();
+ final List<Rule> rules =
metadataRuleManager.getRulesWithDefault(dataSourceName);
+ final Table<SegmentId, String, Integer> segmentsInCluster =
HashBasedTable.create();
+ final DateTime now = DateTimes.nowUtc();
+
+ for (DataSegment segment : segments.get()) {
+ for (DruidServer druidServer : serverInventoryView.getInventory()) {
+ String tier = druidServer.getTier();
+ SegmentId segmentId = segment.getId();
+ DruidDataSource druidDataSource =
druidServer.getDataSource(dataSourceName);
+ if (druidDataSource != null && druidDataSource.getSegment(segmentId)
!= null) {
+ Integer numReplicants = segmentsInCluster.get(segmentId, tier);
+ if (numReplicants == null) {
+ numReplicants = 0;
+ }
+ segmentsInCluster.put(segmentId, tier, numReplicants + 1);
+ }
+ }
+ }
+ for (DataSegment segment : segments.get()) {
Review comment:
Hmm, is there a way to re-arrange this without iterating the entire set
of segments twice? If not it would maybe be worth pushing this into
`DruidCoordinator`, at least if force refresh is true, since it potentially has
`segmentReplicantLookup` already built, or exposing it to this resource in some
manner.
##########
File path:
server/src/main/java/org/apache/druid/server/http/DataSourcesResource.java
##########
@@ -391,6 +396,123 @@ public Response getServedSegmentsInInterval(
return getServedSegmentsInInterval(dataSourceName, full != null,
theInterval::contains);
}
+ @GET
+ @Path("/{dataSourceName}/loadstatus")
+ @Produces(MediaType.APPLICATION_JSON)
+ @ResourceFilters(DatasourceResourceFilter.class)
+ public Response getDatasourceLoadstatus(
+ @PathParam("dataSourceName") String dataSourceName,
+ @QueryParam("interval") @Nullable final String interval,
+ @QueryParam("forceMetadataRefresh") @Nullable final Boolean
forceMetadataRefresh,
+ @QueryParam("simple") @Nullable final String simple,
+ @QueryParam("full") @Nullable final String full
+ )
+ {
+ final Interval theInterval;
+ if (interval == null) {
+ long defaultIntervalOffset = 14 * 24 * 60 * 60 * 1000;
+ long currentTimeInMs = System.currentTimeMillis();
+ theInterval = Intervals.utc(currentTimeInMs - defaultIntervalOffset,
currentTimeInMs);
+ } else {
+ theInterval = Intervals.of(interval.replace('_', '/'));
+ }
+
+ boolean requiresMetadataStorePoll = forceMetadataRefresh == null ? true :
forceMetadataRefresh;
+
+ Optional<Iterable<DataSegment>> segments =
segmentsMetadataManager.iterateAllUsedNonOvershadowedSegmentsForDatasourceInterval(
+ dataSourceName,
+ theInterval,
+ requiresMetadataStorePoll
+ );
+
+ if (!segments.isPresent()) {
+ return logAndCreateDataSourceNotFoundResponse(dataSourceName);
+ }
+
+ if (simple != null) {
+ // Calculate resposne for simple mode
+ Map<SegmentId, SegmentLoadInfo> segmentLoadInfos =
serverInventoryView.getSegmentLoadInfos();
+ int numUnloadedSegments = 0;
+ for (DataSegment segment : segments.get()) {
+ if (!segmentLoadInfos.containsKey(segment.getId())) {
+ numUnloadedSegments++;
+ }
+ }
+ return Response.ok(
+ ImmutableMap.of(
+ dataSourceName,
+ numUnloadedSegments
+ )
+ ).build();
+ } else if (full != null) {
+ // Calculate resposne for full mode
+ final Map<String, Object2LongMap<String>>
underReplicationCountsPerDataSourcePerTier = new HashMap<>();
+ final List<Rule> rules =
metadataRuleManager.getRulesWithDefault(dataSourceName);
+ final Table<SegmentId, String, Integer> segmentsInCluster =
HashBasedTable.create();
+ final DateTime now = DateTimes.nowUtc();
+
+ for (DataSegment segment : segments.get()) {
+ for (DruidServer druidServer : serverInventoryView.getInventory()) {
+ String tier = druidServer.getTier();
+ SegmentId segmentId = segment.getId();
+ DruidDataSource druidDataSource =
druidServer.getDataSource(dataSourceName);
+ if (druidDataSource != null && druidDataSource.getSegment(segmentId)
!= null) {
+ Integer numReplicants = segmentsInCluster.get(segmentId, tier);
+ if (numReplicants == null) {
+ numReplicants = 0;
+ }
+ segmentsInCluster.put(segmentId, tier, numReplicants + 1);
+ }
+ }
+ }
+ for (DataSegment segment : segments.get()) {
+ for (final Rule rule : rules) {
+ if (!(rule instanceof LoadRule && rule.appliesTo(segment, now))) {
Review comment:
Yeah, I think you need to handle `BroadcastDistributionRule` here too if
you want to be totally complete, however `CoordinatorResource` loadstatus api
call has this problem too, so it would probably be ok to fix both in a
follow-up PR.
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]