This is an automated email from the ASF dual-hosted git repository.
tuglu pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/druid.git
The following commit(s) were added to refs/heads/master by this push:
new 860d107fba4 Emit per-query result-level caching metrics on Broker
(#18063)
860d107fba4 is described below
commit 860d107fba44ce90a51e78feceb4b5b7896c351f
Author: jtuglu-netflix <[email protected]>
AuthorDate: Mon Jun 30 09:55:06 2025 -0700
Emit per-query result-level caching metrics on Broker (#18063)
- Adds `query/resultCache/hit` metric which indicates whether the given
query hit the result cache (if it was polled).
---
docs/operations/metrics.md | 1 +
.../apache/druid/query/DefaultQueryMetrics.java | 7 +++
.../java/org/apache/druid/query/QueryMetrics.java | 5 ++
.../query/search/DefaultSearchQueryMetrics.java | 6 +++
.../druid/query/DefaultQueryMetricsTest.java | 6 ++-
.../druid/query/ResultLevelCachingQueryRunner.java | 19 ++++++--
.../druid/server/ClientQuerySegmentWalker.java | 3 +-
.../QueryRunnerBasedOnClusteredClientTestBase.java | 6 ++-
.../query/ResultLevelCachingQueryRunnerTest.java | 55 ++++++++++++++++++++--
9 files changed, 95 insertions(+), 13 deletions(-)
diff --git a/docs/operations/metrics.md b/docs/operations/metrics.md
index 4d8665fa6fa..8fc1cf34164 100644
--- a/docs/operations/metrics.md
+++ b/docs/operations/metrics.md
@@ -54,6 +54,7 @@ Most metric values reset each emission period, as specified
in `druid.monitoring
|`query/time`|Milliseconds taken to complete a query.|<p>Common: `dataSource`,
`type`, `interval`, `hasFilters`, `duration`, `context`, `remoteAddress`,
`id`.</p><p>Aggregation Queries: `numMetrics`,
`numComplexMetrics`.</p><p>GroupBy: `numDimensions`.</p><p> TopN: `threshold`,
`dimension`.</p>|< 1s|
|`query/bytes`|The total number of bytes returned to the requesting client in
the query response from the broker. Other services report the total bytes for
their portion of the query. |<p>Common: `dataSource`, `type`, `interval`,
`hasFilters`, `duration`, `context`, `remoteAddress`, `id`.</p><p> Aggregation
Queries: `numMetrics`, `numComplexMetrics`.</p><p> GroupBy:
`numDimensions`.</p><p> TopN: `threshold`, `dimension`.</p>| |
|`query/node/time`|Milliseconds taken to query individual historical/realtime
processes.|`id`, `status`, `server`|< 1s|
+|`query/resultCache/hit`|Whether the query hit the result cache (1) or not
(0). Emission of the metric indicates the result-level cache was
polled.|<p>Common: `dataSource`, `type`, `interval`, `hasFilters`, `duration`,
`context`, `remoteAddress`, `id`.</p>|Varies|
|`query/node/bytes`|Number of bytes returned from querying individual
historical/realtime processes.|`id`, `status`, `server`| |
|`query/node/ttfb`|Time to first byte. Milliseconds elapsed until Broker
starts receiving the response from individual historical/realtime
processes.|`id`, `status`, `server`|< 1s|
|`query/count`|Number of total queries.|This metric is only available if the
`QueryCountStatsMonitor` module is included.| |
diff --git
a/processing/src/main/java/org/apache/druid/query/DefaultQueryMetrics.java
b/processing/src/main/java/org/apache/druid/query/DefaultQueryMetrics.java
index 9f349484153..9f2ed5fa675 100644
--- a/processing/src/main/java/org/apache/druid/query/DefaultQueryMetrics.java
+++ b/processing/src/main/java/org/apache/druid/query/DefaultQueryMetrics.java
@@ -43,6 +43,7 @@ public class DefaultQueryMetrics<QueryType extends Query<?>>
implements QueryMet
public static final String QUERY_WAIT_TIME = "query/wait/time";
public static final String QUERY_SEGMENT_TIME = "query/segment/time";
public static final String QUERY_SEGMENT_AND_CACHE_TIME =
"query/segmentAndCache/time";
+ public static final String QUERY_RESULT_CACHE_HIT = "query/resultCache/hit";
protected final ServiceMetricEvent.Builder builder = new
ServiceMetricEvent.Builder();
protected final Map<String, Number> metrics = new HashMap<>();
@@ -254,6 +255,12 @@ public class DefaultQueryMetrics<QueryType extends
Query<?>> implements QueryMet
return reportMillisTimeMetric(QUERY_SEGMENT_AND_CACHE_TIME, timeNs);
}
+ @Override
+ public QueryMetrics<QueryType> reportResultCachePoll(boolean hit)
+ {
+ return reportMetric(QUERY_RESULT_CACHE_HIT, hit ? 1 : 0);
+ }
+
@Override
public QueryMetrics<QueryType> reportCpuTime(long timeNs)
{
diff --git a/processing/src/main/java/org/apache/druid/query/QueryMetrics.java
b/processing/src/main/java/org/apache/druid/query/QueryMetrics.java
index 812b1dd9134..284da48c42b 100644
--- a/processing/src/main/java/org/apache/druid/query/QueryMetrics.java
+++ b/processing/src/main/java/org/apache/druid/query/QueryMetrics.java
@@ -355,6 +355,11 @@ public interface QueryMetrics<QueryType extends Query<?>>
*/
QueryMetrics<QueryType> reportSegmentAndCacheTime(long timeNs);
+ /**
+ * Emits iff a given query polled the result-level cache and the success of
that operation.
+ */
+ QueryMetrics<QueryType> reportResultCachePoll(boolean hit);
+
/**
* Registers "cpu time" metric.
*/
diff --git
a/processing/src/main/java/org/apache/druid/query/search/DefaultSearchQueryMetrics.java
b/processing/src/main/java/org/apache/druid/query/search/DefaultSearchQueryMetrics.java
index 518611e408d..85696d69ef9 100644
---
a/processing/src/main/java/org/apache/druid/query/search/DefaultSearchQueryMetrics.java
+++
b/processing/src/main/java/org/apache/druid/query/search/DefaultSearchQueryMetrics.java
@@ -217,6 +217,12 @@ public class DefaultSearchQueryMetrics implements
SearchQueryMetrics
return delegateQueryMetrics.reportSegmentAndCacheTime(timeNs);
}
+ @Override
+ public QueryMetrics reportResultCachePoll(boolean hit)
+ {
+ return delegateQueryMetrics.reportResultCachePoll(hit);
+ }
+
@Override
public QueryMetrics reportCpuTime(long timeNs)
{
diff --git
a/processing/src/test/java/org/apache/druid/query/DefaultQueryMetricsTest.java
b/processing/src/test/java/org/apache/druid/query/DefaultQueryMetricsTest.java
index f2fbdc1eb98..ce7481c4e4a 100644
---
a/processing/src/test/java/org/apache/druid/query/DefaultQueryMetricsTest.java
+++
b/processing/src/test/java/org/apache/druid/query/DefaultQueryMetricsTest.java
@@ -131,12 +131,14 @@ public class DefaultQueryMetricsTest extends
InitializedNullHandlingTest
queryMetrics.reportNodeBytes(10).emit(serviceEmitter);
serviceEmitter.verifyValue("query/node/bytes", 10L);
- Assert.assertEquals(9, serviceEmitter.getEvents().size());
+
+ queryMetrics.reportResultCachePoll(true).emit(serviceEmitter);
+ serviceEmitter.verifyValue("query/resultCache/hit", 1);
// Verify that Queried Segment Count does not get emitted by the
DefaultQueryMetrics
// and the total number of emitted metrics remains unchanged
queryMetrics.reportQueriedSegmentCount(25).emit(serviceEmitter);
- Assert.assertEquals(9, serviceEmitter.getEvents().size());
+ Assert.assertEquals(10, serviceEmitter.getEvents().size());
}
@Test
diff --git
a/server/src/main/java/org/apache/druid/query/ResultLevelCachingQueryRunner.java
b/server/src/main/java/org/apache/druid/query/ResultLevelCachingQueryRunner.java
index 872590b0643..5d029af8b51 100644
---
a/server/src/main/java/org/apache/druid/query/ResultLevelCachingQueryRunner.java
+++
b/server/src/main/java/org/apache/druid/query/ResultLevelCachingQueryRunner.java
@@ -38,6 +38,7 @@ import
org.apache.druid.java.util.common.guava.SequenceWrapper;
import org.apache.druid.java.util.common.guava.Sequences;
import org.apache.druid.java.util.common.jackson.JacksonUtils;
import org.apache.druid.java.util.common.logger.Logger;
+import org.apache.druid.java.util.emitter.service.ServiceEmitter;
import org.apache.druid.query.context.ResponseContext;
import org.apache.druid.server.QueryResource;
@@ -58,6 +59,8 @@ public class ResultLevelCachingQueryRunner<T> implements
QueryRunner<T>
private final boolean populateResultCache;
private Query<T> query;
private final CacheStrategy<T, Object, Query<T>> strategy;
+ private final QueryToolChest queryToolChest;
+ private final ServiceEmitter emitter;
public ResultLevelCachingQueryRunner(
@@ -66,7 +69,8 @@ public class ResultLevelCachingQueryRunner<T> implements
QueryRunner<T>
Query<T> query,
ObjectMapper objectMapper,
Cache cache,
- CacheConfig cacheConfig
+ CacheConfig cacheConfig,
+ ServiceEmitter emitter
)
{
this.baseRunner = baseRunner;
@@ -82,6 +86,8 @@ public class ResultLevelCachingQueryRunner<T> implements
QueryRunner<T>
CacheUtil.ServerType.BROKER
);
this.useResultCache = CacheUtil.isUseResultCache(query, strategy,
cacheConfig, CacheUtil.ServerType.BROKER);
+ this.queryToolChest = queryToolChest;
+ this.emitter = emitter;
}
@Override
@@ -104,8 +110,15 @@ public class ResultLevelCachingQueryRunner<T> implements
QueryRunner<T>
);
String newResultSetId = responseContext.getEntityTag();
- if (useResultCache && newResultSetId != null &&
newResultSetId.equals(existingResultSetId)) {
- log.debug("Return cached result set as there is no change in
identifiers for query %s ", query.getId());
+ final boolean cacheHit = newResultSetId != null &&
newResultSetId.equals(existingResultSetId);
+ if (useResultCache) {
+ final QueryMetrics<?> queryMetrics =
queryPlus.withQueryMetrics(queryToolChest).getQueryMetrics();
+ queryMetrics.reportResultCachePoll(cacheHit);
+ queryMetrics.emit(emitter);
+ }
+
+ if (useResultCache && cacheHit) {
+ log.debug("Return cached result set as there is no change in
identifiers for query[%s].", query.getId());
// Call accumulate on the sequence to ensure that all
Wrapper/Closer/Baggage/etc. get called
resultFromClient.accumulate(null, (accumulated, in) -> accumulated);
return deserializeResults(cachedResultSet, strategy,
existingResultSetId);
diff --git
a/server/src/main/java/org/apache/druid/server/ClientQuerySegmentWalker.java
b/server/src/main/java/org/apache/druid/server/ClientQuerySegmentWalker.java
index 5e02a550dd0..fe9b052de9b 100644
--- a/server/src/main/java/org/apache/druid/server/ClientQuerySegmentWalker.java
+++ b/server/src/main/java/org/apache/druid/server/ClientQuerySegmentWalker.java
@@ -601,7 +601,8 @@ public class ClientQuerySegmentWalker implements
QuerySegmentWalker
query,
objectMapper,
cache,
- cacheConfig
+ cacheConfig,
+ emitter
)
);
}
diff --git
a/server/src/test/java/org/apache/druid/query/QueryRunnerBasedOnClusteredClientTestBase.java
b/server/src/test/java/org/apache/druid/query/QueryRunnerBasedOnClusteredClientTestBase.java
index 76a63442e3e..fdd7ca66a45 100644
---
a/server/src/test/java/org/apache/druid/query/QueryRunnerBasedOnClusteredClientTestBase.java
+++
b/server/src/test/java/org/apache/druid/query/QueryRunnerBasedOnClusteredClientTestBase.java
@@ -40,6 +40,7 @@ import org.apache.druid.java.util.common.Intervals;
import org.apache.druid.java.util.common.StringUtils;
import org.apache.druid.java.util.common.granularity.Granularities;
import org.apache.druid.java.util.common.io.Closer;
+import org.apache.druid.java.util.metrics.StubServiceEmitter;
import org.apache.druid.query.aggregation.CountAggregatorFactory;
import org.apache.druid.query.context.ConcurrentResponseContext;
import org.apache.druid.query.context.ResponseContext;
@@ -50,7 +51,6 @@ import
org.apache.druid.segment.generator.GeneratorBasicSchemas;
import org.apache.druid.segment.generator.GeneratorSchemaInfo;
import org.apache.druid.segment.generator.SegmentGenerator;
import org.apache.druid.server.QueryStackTests;
-import org.apache.druid.server.metrics.NoopServiceEmitter;
import org.apache.druid.timeline.DataSegment;
import org.apache.druid.timeline.partition.NumberedShardSpec;
import org.joda.time.Interval;
@@ -100,6 +100,7 @@ public abstract class
QueryRunnerBasedOnClusteredClientTestBase
protected List<DruidServer> servers;
private SegmentGenerator segmentGenerator;
+ protected StubServiceEmitter emitter = new StubServiceEmitter();
protected QueryRunnerBasedOnClusteredClientTestBase()
{
@@ -121,6 +122,7 @@ public abstract class
QueryRunnerBasedOnClusteredClientTestBase
segmentGenerator = new SegmentGenerator();
httpClient = new TestHttpClient(objectMapper);
simpleServerView = new SimpleServerView(conglomerate, objectMapper,
httpClient);
+ emitter.flush();
cachingClusteredClient = new CachingClusteredClient(
conglomerate,
simpleServerView,
@@ -132,7 +134,7 @@ public abstract class
QueryRunnerBasedOnClusteredClientTestBase
QueryStackTests.getParallelMergeConfig(USE_PARALLEL_MERGE_POOL_CONFIGURED),
ForkJoinPool.commonPool(),
QueryStackTests.DEFAULT_NOOP_SCHEDULER,
- new NoopServiceEmitter()
+ emitter
);
servers = new ArrayList<>();
}
diff --git
a/server/src/test/java/org/apache/druid/query/ResultLevelCachingQueryRunnerTest.java
b/server/src/test/java/org/apache/druid/query/ResultLevelCachingQueryRunnerTest.java
index aef6ed503ad..37f5a293b90 100644
---
a/server/src/test/java/org/apache/druid/query/ResultLevelCachingQueryRunnerTest.java
+++
b/server/src/test/java/org/apache/druid/query/ResultLevelCachingQueryRunnerTest.java
@@ -97,6 +97,8 @@ public class ResultLevelCachingQueryRunnerTest extends
QueryRunnerBasedOnCluster
Assert.assertEquals(0, cache.getStats().getNumHits());
Assert.assertEquals(0, cache.getStats().getNumEntries());
Assert.assertEquals(0, cache.getStats().getNumMisses());
+
+ emitter.verifyNotEmitted(DefaultQueryMetrics.QUERY_RESULT_CACHE_HIT);
}
@Test
@@ -118,6 +120,9 @@ public class ResultLevelCachingQueryRunnerTest extends
QueryRunnerBasedOnCluster
Assert.assertEquals(1, cache.getStats().getNumEntries());
Assert.assertEquals(0, cache.getStats().getNumMisses());
+ emitter.verifyNotEmitted(DefaultQueryMetrics.QUERY_RESULT_CACHE_HIT);
+ emitter.flush();
+
final ResultLevelCachingQueryRunner<Result<TimeseriesResultValue>>
queryRunner2 = createQueryRunner(
newCacheConfig(true, false, DEFAULT_CACHE_ENTRY_MAX_SIZE),
query
@@ -132,6 +137,8 @@ public class ResultLevelCachingQueryRunnerTest extends
QueryRunnerBasedOnCluster
Assert.assertEquals(0, cache.getStats().getNumHits());
Assert.assertEquals(1, cache.getStats().getNumEntries());
Assert.assertEquals(0, cache.getStats().getNumMisses());
+
+ emitter.verifyNotEmitted(DefaultQueryMetrics.QUERY_RESULT_CACHE_HIT);
}
@Test
@@ -153,6 +160,9 @@ public class ResultLevelCachingQueryRunnerTest extends
QueryRunnerBasedOnCluster
Assert.assertEquals(0, cache.getStats().getNumEntries());
Assert.assertEquals(0, cache.getStats().getNumMisses());
+ emitter.verifyNotEmitted(DefaultQueryMetrics.QUERY_RESULT_CACHE_HIT);
+ emitter.flush();
+
final ResultLevelCachingQueryRunner<Result<TimeseriesResultValue>>
queryRunner2 = createQueryRunner(
newCacheConfig(false, true, DEFAULT_CACHE_ENTRY_MAX_SIZE),
query
@@ -167,6 +177,9 @@ public class ResultLevelCachingQueryRunnerTest extends
QueryRunnerBasedOnCluster
Assert.assertEquals(0, cache.getStats().getNumHits());
Assert.assertEquals(0, cache.getStats().getNumEntries());
Assert.assertEquals(1, cache.getStats().getNumMisses());
+
+ emitter.verifyEmitted(DefaultQueryMetrics.QUERY_RESULT_CACHE_HIT, 1);
+ emitter.verifyValue(DefaultQueryMetrics.QUERY_RESULT_CACHE_HIT, 0);
}
@Test
@@ -188,6 +201,10 @@ public class ResultLevelCachingQueryRunnerTest extends
QueryRunnerBasedOnCluster
Assert.assertEquals(1, cache.getStats().getNumEntries());
Assert.assertEquals(1, cache.getStats().getNumMisses());
+ emitter.verifyEmitted(DefaultQueryMetrics.QUERY_RESULT_CACHE_HIT, 1);
+ emitter.verifyValue(DefaultQueryMetrics.QUERY_RESULT_CACHE_HIT, 0);
+ emitter.flush();
+
final ResultLevelCachingQueryRunner<Result<TimeseriesResultValue>>
queryRunner2 = createQueryRunner(
newCacheConfig(true, true, DEFAULT_CACHE_ENTRY_MAX_SIZE),
query
@@ -202,6 +219,9 @@ public class ResultLevelCachingQueryRunnerTest extends
QueryRunnerBasedOnCluster
Assert.assertEquals(1, cache.getStats().getNumHits());
Assert.assertEquals(1, cache.getStats().getNumEntries());
Assert.assertEquals(1, cache.getStats().getNumMisses());
+
+ emitter.verifyEmitted(DefaultQueryMetrics.QUERY_RESULT_CACHE_HIT, 1);
+ emitter.verifyValue(DefaultQueryMetrics.QUERY_RESULT_CACHE_HIT, 1);
}
@Test
@@ -223,6 +243,10 @@ public class ResultLevelCachingQueryRunnerTest extends
QueryRunnerBasedOnCluster
Assert.assertEquals(0, cache.getStats().getNumEntries());
Assert.assertEquals(1, cache.getStats().getNumMisses());
+ emitter.verifyEmitted(DefaultQueryMetrics.QUERY_RESULT_CACHE_HIT, 1);
+ emitter.verifyValue(DefaultQueryMetrics.QUERY_RESULT_CACHE_HIT, 0);
+ emitter.flush();
+
final ResultLevelCachingQueryRunner<Result<TimeseriesResultValue>>
queryRunner2 = createQueryRunner(
newCacheConfig(true, true, DEFAULT_CACHE_ENTRY_MAX_SIZE),
query
@@ -237,6 +261,9 @@ public class ResultLevelCachingQueryRunnerTest extends
QueryRunnerBasedOnCluster
Assert.assertEquals(0, cache.getStats().getNumHits());
Assert.assertEquals(1, cache.getStats().getNumEntries());
Assert.assertEquals(2, cache.getStats().getNumMisses());
+
+ emitter.verifyEmitted(DefaultQueryMetrics.QUERY_RESULT_CACHE_HIT, 1);
+ emitter.verifyValue(DefaultQueryMetrics.QUERY_RESULT_CACHE_HIT, 0);
}
@Test
@@ -272,6 +299,8 @@ public class ResultLevelCachingQueryRunnerTest extends
QueryRunnerBasedOnCluster
Assert.assertEquals(0, cache.getStats().getNumHits());
Assert.assertEquals(0, cache.getStats().getNumEntries());
Assert.assertEquals(0, cache.getStats().getNumMisses());
+
+ emitter.verifyNotEmitted(DefaultQueryMetrics.QUERY_RESULT_CACHE_HIT);
}
}
@@ -290,13 +319,13 @@ public class ResultLevelCachingQueryRunnerTest extends
QueryRunnerBasedOnCluster
objectMapper
));
Mockito.doAnswer((Answer<Object>) invocation -> {
- List<ReferenceCountingResourceHolder<ByteBuffer>> resoruce =
mergePool.takeBatch(1, 1);
- if (resoruce.isEmpty()) {
+ List<ReferenceCountingResourceHolder<ByteBuffer>> resource =
mergePool.takeBatch(1, 1);
+ if (resource.isEmpty()) {
fail("Resource should not be empty");
}
Sequence<Result<TimeseriesResultValue>> realSequence =
(Sequence<Result<TimeseriesResultValue>>) invocation.callRealMethod();
Closer closer = Closer.create();
- closer.register(() ->
resoruce.forEach(ReferenceCountingResourceHolder::close));
+ closer.register(() ->
resource.forEach(ReferenceCountingResourceHolder::close));
return Sequences.withBaggage(realSequence, closer);
}).when(spyRunner).run(ArgumentMatchers.any(), ArgumentMatchers.any());
@@ -306,7 +335,8 @@ public class ResultLevelCachingQueryRunnerTest extends
QueryRunnerBasedOnCluster
query,
objectMapper,
cache,
- cacheConfig
+ cacheConfig,
+ emitter
);
final Sequence<Result<TimeseriesResultValue>> sequence1 = queryRunner1.run(
@@ -318,6 +348,10 @@ public class ResultLevelCachingQueryRunnerTest extends
QueryRunnerBasedOnCluster
Assert.assertEquals(1, cache.getStats().getNumEntries());
Assert.assertEquals(1, cache.getStats().getNumMisses());
+ emitter.verifyEmitted(DefaultQueryMetrics.QUERY_RESULT_CACHE_HIT, 1);
+ emitter.verifyValue(DefaultQueryMetrics.QUERY_RESULT_CACHE_HIT, 0);
+ emitter.flush();
+
final Sequence<Result<TimeseriesResultValue>> sequence2 = queryRunner1.run(
QueryPlus.wrap(query),
@@ -329,6 +363,10 @@ public class ResultLevelCachingQueryRunnerTest extends
QueryRunnerBasedOnCluster
Assert.assertEquals(1, cache.getStats().getNumEntries());
Assert.assertEquals(1, cache.getStats().getNumMisses());
+ emitter.verifyEmitted(DefaultQueryMetrics.QUERY_RESULT_CACHE_HIT, 1);
+ emitter.verifyValue(DefaultQueryMetrics.QUERY_RESULT_CACHE_HIT, 1);
+ emitter.flush();
+
final Sequence<Result<TimeseriesResultValue>> sequence3 = queryRunner1.run(
QueryPlus.wrap(query),
responseContext()
@@ -338,6 +376,9 @@ public class ResultLevelCachingQueryRunnerTest extends
QueryRunnerBasedOnCluster
Assert.assertEquals(2, cache.getStats().getNumHits());
Assert.assertEquals(1, cache.getStats().getNumEntries());
Assert.assertEquals(1, cache.getStats().getNumMisses());
+
+ emitter.verifyEmitted(DefaultQueryMetrics.QUERY_RESULT_CACHE_HIT, 1);
+ emitter.verifyValue(DefaultQueryMetrics.QUERY_RESULT_CACHE_HIT, 1);
}
@Test
@@ -360,6 +401,9 @@ public class ResultLevelCachingQueryRunnerTest extends
QueryRunnerBasedOnCluster
Assert.assertEquals(0, cache.getStats().getNumHits());
Assert.assertEquals(0, cache.getStats().getNumEntries());
Assert.assertEquals(1, cache.getStats().getNumMisses());
+
+ emitter.verifyEmitted(DefaultQueryMetrics.QUERY_RESULT_CACHE_HIT, 1);
+ emitter.verifyValue(DefaultQueryMetrics.QUERY_RESULT_CACHE_HIT, 0);
}
private <T> ResultLevelCachingQueryRunner<T> createQueryRunner(
@@ -379,7 +423,8 @@ public class ResultLevelCachingQueryRunnerTest extends
QueryRunnerBasedOnCluster
query,
objectMapper,
cache,
- cacheConfig
+ cacheConfig,
+ emitter
);
}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]