This is an automated email from the ASF dual-hosted git repository.
oleewere pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ambari-logsearch.git
The following commit(s) were added to refs/heads/master by this push:
new a739360 AMBARI-24833. Cloud base path can be URI as well
a739360 is described below
commit a739360576790a52f8f67969a3ce97f08874d13e
Author: Oliver Szabo <[email protected]>
AuthorDate: Fri Dec 7 01:08:32 2018 +0100
AMBARI-24833. Cloud base path can be URI as well
---
.../ambari/logfeeder/conf/LogFeederProps.java | 6 +-
.../output/cloud/CloudStorageUploader.java | 22 +-
.../output/cloud/upload/HDFSUploadClient.java | 9 +
.../ambari/logfeeder/util/LogFeederHDFSUtil.java | 4 +-
.../src/main/resources/core-site.xml | 2 +-
.../output/cloud/CloudStorageUploaderTest.java | 82 +++
docs/api-docs/logsearch-swagger.yaml | 694 ++++++++++-----------
docs/logfeeder_properties.md | 2 +-
8 files changed, 465 insertions(+), 356 deletions(-)
diff --git
a/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/conf/LogFeederProps.java
b/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/conf/LogFeederProps.java
index bafb321..41445cd 100644
---
a/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/conf/LogFeederProps.java
+++
b/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/conf/LogFeederProps.java
@@ -311,12 +311,12 @@ public class LogFeederProps implements
LogFeederProperties {
@LogSearchPropertyDescription(
name = LogFeederConstants.CLOUD_STORAGE_BASE_PATH,
- description = "Base path prefix for storing logs (cloud storage / hdfs)",
- examples = {"/user/logsearch/mypath"},
+ description = "Base path prefix for storing logs (cloud storage / hdfs),
could be an absolute path or URI. (if URI used, that will override the
default.FS with HDFS client)",
+ examples = {"/user/logsearch/mypath", "s3a:///user/logsearch"},
defaultValue = "/apps/logsearch",
sources = {LogFeederConstants.LOGFEEDER_PROPERTIES_FILE}
)
- @Value("${" + LogFeederConstants.CLOUD_STORAGE_BASE_PATH + ":}")
+ @Value("${" + LogFeederConstants.CLOUD_STORAGE_BASE_PATH +
":/apps/logsearch}")
private String cloudBasePath;
@LogSearchPropertyDescription(
diff --git
a/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/output/cloud/CloudStorageUploader.java
b/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/output/cloud/CloudStorageUploader.java
index cb4cac3..6a80cab 100644
---
a/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/output/cloud/CloudStorageUploader.java
+++
b/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/output/cloud/CloudStorageUploader.java
@@ -18,10 +18,12 @@
*/
package org.apache.ambari.logfeeder.output.cloud;
+import com.google.common.annotations.VisibleForTesting;
import org.apache.ambari.logfeeder.conf.LogFeederProps;
import org.apache.ambari.logfeeder.output.cloud.upload.UploadClient;
import org.apache.ambari.logfeeder.util.LogFeederUtil;
import org.apache.commons.io.FileUtils;
+import org.apache.commons.lang.StringUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
@@ -90,9 +92,7 @@ public class CloudStorageUploader extends Thread {
logger.debug("Not found any files to upload.");
} else {
for (File file : filesToUpload) {
- String basePath = logFeederProps.getCloudBasePath();
- String outputPath = String.format("%s/%s/%s/%s/%s", basePath,
clusterName, hostName, file.getParentFile().getName(), file.getName())
- .replaceAll("//", "/");
+ final String outputPath =
generateOutputPath(logFeederProps.getCloudBasePath(), clusterName, hostName,
file);
logger.info("Upload will start: input: {}, output: {}",
file.getAbsolutePath(), outputPath);
Future<?> future = executorService.submit(() -> {
try {
@@ -114,4 +114,20 @@ public class CloudStorageUploader extends Thread {
}
}
+ @VisibleForTesting
+ String generateOutputPath(String basePath, String clusterName, String
hostName, File localFile) {
+ final String outputWithoutBasePath = Paths.get(clusterName, hostName,
localFile.getParentFile().getName(), localFile.getName()).toString();
+ final String outputPath;
+ if (StringUtils.isNotEmpty(basePath)) {
+ if (!basePath.endsWith("/")){
+ outputPath = basePath + "/" + outputWithoutBasePath;
+ } else {
+ outputPath = basePath + outputWithoutBasePath;
+ }
+ } else {
+ outputPath = outputWithoutBasePath;
+ }
+ return outputPath;
+ }
+
}
diff --git
a/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/output/cloud/upload/HDFSUploadClient.java
b/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/output/cloud/upload/HDFSUploadClient.java
index 3d5ec8f..e072277 100644
---
a/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/output/cloud/upload/HDFSUploadClient.java
+++
b/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/output/cloud/upload/HDFSUploadClient.java
@@ -64,7 +64,12 @@ public class HDFSUploadClient implements UploadClient {
configuration = new Configuration();
logger.info("Initialize HDFS client (cloud mode), using core-site.xml
from the classpath.");
}
+ if (hasScheme(logFeederProps.getCloudBasePath())) {
+ logger.info("Use cloud base path ({}) as fs.defaultFS",
logFeederProps.getCloudBasePath());
+ configuration.set(FS_DEFAULT_FS, logFeederProps.getCloudBasePath());
+ }
if (StringUtils.isNotBlank(logFeederProps.getCustomFs())) {
+ logger.info("Override fs.defaultFS with {}",
logFeederProps.getCustomFs());
configuration.set(FS_DEFAULT_FS, logFeederProps.getCustomFs());
}
if (hdfsOutputConfig.isHdfsKerberos()) {
@@ -90,6 +95,10 @@ public class HDFSUploadClient implements UploadClient {
LogFeederHDFSUtil.overrideFileSystemConfigs(logFeederProps,
configurationRef.get());
}
+ private boolean hasScheme(String path) {
+ return StringUtils.isNotBlank(path) && path.split(":/").length > 1;
+ }
+
@Override
public void upload(String source, String target) throws Exception {
final FileSystem fs =
LogFeederHDFSUtil.buildFileSystem(configurationRef.get());
diff --git
a/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/util/LogFeederHDFSUtil.java
b/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/util/LogFeederHDFSUtil.java
index b741229..c489fbb 100644
---
a/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/util/LogFeederHDFSUtil.java
+++
b/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/util/LogFeederHDFSUtil.java
@@ -19,6 +19,7 @@
package org.apache.ambari.logfeeder.util;
import java.io.IOException;
+import java.nio.file.Paths;
import java.util.Map;
import java.util.Properties;
@@ -39,9 +40,10 @@ public class LogFeederHDFSUtil {
public static void copyFromLocal(String sourceFilepath, String destFilePath,
FileSystem fileSystem, boolean overwrite,
boolean delSrc, FsPermission
fsPermission) throws Exception {
+ String fsUri = fileSystem.getUri().toString();
Path src = new Path(sourceFilepath);
Path dst = new Path(destFilePath);
- logger.info("copying localfile := " + sourceFilepath + " to hdfsPath := "
+ destFilePath);
+ logger.info("Copying localfile '{}' to hdfsPath (FS base URI: {}) '{}'",
sourceFilepath, fsUri, destFilePath);
fileSystem.copyFromLocalFile(delSrc, overwrite, src, dst);
if (fsPermission != null) {
fileSystem.setPermission(dst, fsPermission);
diff --git a/ambari-logsearch-logfeeder/src/main/resources/core-site.xml
b/ambari-logsearch-logfeeder/src/main/resources/core-site.xml
index 9ec1d04..cc1e4a6 100644
--- a/ambari-logsearch-logfeeder/src/main/resources/core-site.xml
+++ b/ambari-logsearch-logfeeder/src/main/resources/core-site.xml
@@ -17,7 +17,7 @@
<configuration>
<property>
<name>fs.defaultFS</name>
- <value>s3a://logfeeder</value>
+ <value>s3a://logfeeder/apps</value>
</property>
<property>
<name>fs.s3a.endpoint</name>
diff --git
a/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/output/cloud/CloudStorageUploaderTest.java
b/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/output/cloud/CloudStorageUploaderTest.java
new file mode 100644
index 0000000..f4a2767
--- /dev/null
+++
b/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/output/cloud/CloudStorageUploaderTest.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.logfeeder.output.cloud;
+
+import org.apache.ambari.logfeeder.conf.CloudStorageDestination;
+import org.apache.ambari.logfeeder.conf.LogFeederProps;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.File;
+
+public class CloudStorageUploaderTest {
+
+ private static final String CLUSTER_NAME = "cl";
+ private static final String HOSTNAME = "hostname";
+
+ private CloudStorageUploader underTest;
+
+ @Before
+ public void setUp() {
+ LogFeederProps logFeederProps = new LogFeederProps();
+
logFeederProps.setCloudStorageDestination(CloudStorageDestination.DEFAULT_FS);
+ underTest = new CloudStorageUploader("name", null, logFeederProps);
+ }
+
+ @Test
+ public void testGenerateOutputPath() {
+ // GIVEN
+ String basePath = "example";
+ // WHEN
+ String output = underTest.generateOutputPath(basePath, CLUSTER_NAME,
HOSTNAME, new File("/my/path"));
+ // THEN
+ Assert.assertEquals("example/cl/hostname/my/path", output);
+ }
+
+ @Test
+ public void testGenerateOutputPathWithEmptyBasePath() {
+ // GIVEN
+ String basePath = "";
+ // WHEN
+ String output = underTest.generateOutputPath(basePath, CLUSTER_NAME,
HOSTNAME, new File("/my/path"));
+ // THEN
+ Assert.assertEquals("cl/hostname/my/path", output);
+ }
+
+ @Test
+ public void testGenerateOutputPathWithSlashEndAndStart() {
+ // GIVEN
+ String basePath = "example/";
+ // WHEN
+ String output = underTest.generateOutputPath(basePath, CLUSTER_NAME,
HOSTNAME, new File("/my/path"));
+ // THEN
+ Assert.assertEquals("example/cl/hostname/my/path", output);
+ }
+
+ @Test
+ public void testGenerateOutputPathWithScheme() {
+ // GIVEN
+ String basePath = "s3a://bucket/example";
+ // WHEN
+ String output = underTest.generateOutputPath(basePath, CLUSTER_NAME,
HOSTNAME, new File("/my/path"));
+ // THEN
+ Assert.assertEquals("s3a://bucket/example/cl/hostname/my/path", output);
+ }
+}
diff --git a/docs/api-docs/logsearch-swagger.yaml
b/docs/api-docs/logsearch-swagger.yaml
index 4ae364a..d80182b 100644
--- a/docs/api-docs/logsearch-swagger.yaml
+++ b/docs/api-docs/logsearch-swagger.yaml
@@ -19,172 +19,6 @@ schemes:
- "http"
- "https"
paths:
- /audit/logs/serviceload:
- get:
- tags:
- - "auditlogs"
- summary: "The graph for showing the top users accessing the services"
- description: ""
- operationId: "getServiceLoadGet"
- produces:
- - "application/json"
- parameters:
- - name: "startIndex"
- in: "query"
- description: "Start index of the queried result"
- required: false
- type: "string"
- - name: "page"
- in: "query"
- description: "Number of pages for the results"
- required: false
- type: "string"
- default: "0"
- - name: "pageSize"
- in: "query"
- description: "Page size of the results"
- required: false
- type: "string"
- default: "1000"
- - name: "sortBy"
- in: "query"
- description: "Sorting the results based on this field"
- required: false
- type: "string"
- - name: "sortType"
- in: "query"
- description: "Type of sorting (osc, desc)"
- required: false
- type: "string"
- - name: "start_time"
- in: "query"
- description: "Date range param which is suportted from browser url"
- required: false
- type: "string"
- - name: "end_time"
- in: "query"
- description: "Date range param which is supported from browser url"
- required: false
- type: "string"
- - name: "clusters"
- in: "query"
- description: "filter for clusters (comma separated list)"
- required: false
- type: "string"
- - name: "includeMessage"
- in: "query"
- description: "Include query which will query against message column"
- required: false
- type: "string"
- - name: "excludeMessage"
- in: "query"
- description: "Exclude query which will query against message column"
- required: false
- type: "string"
- - name: "mustBe"
- in: "query"
- description: "Include the components, comma separated values"
- required: false
- type: "string"
- - name: "mustNot"
- in: "query"
- description: "Exclude the components, comma separated values"
- required: false
- type: "string"
- - name: "includeQuery"
- in: "query"
- description: "Include the values in query result e.g.:
[{message:*exception*}]"
- required: false
- type: "string"
- - name: "excludeQuery"
- in: "query"
- description: "Exclude the values in query result e.g.:
[{message:*timeout*}]"
- required: false
- type: "string"
- - name: "from"
- in: "query"
- description: "Date range param, start date"
- required: false
- type: "string"
- - name: "to"
- in: "query"
- description: "Date range param, end date"
- required: false
- type: "string"
- - name: "userList"
- in: "query"
- description: "Filter for users (comma separated list)"
- required: false
- type: "string"
- responses:
- 200:
- description: "successful operation"
- schema:
- $ref: "#/definitions/BarGraphDataListResponse"
- security:
- - basicAuth: []
- post:
- tags:
- - "auditlogs"
- summary: "The graph for showing the top users accessing the services"
- description: ""
- operationId: "getServiceLoadPost"
- consumes:
- - "application/json"
- produces:
- - "application/json"
- parameters:
- - in: "body"
- name: "body"
- required: false
- schema:
- $ref: "#/definitions/AuditServiceLoadBodyRequest"
- responses:
- 200:
- description: "successful operation"
- schema:
- $ref: "#/definitions/BarGraphDataListResponse"
- security:
- - basicAuth: []
- /audit/logs/clusters:
- get:
- tags:
- - "auditlogs"
- summary: "Get all of the clusters for audit logs"
- description: ""
- operationId: "getClustersForAuditLogGet"
- produces:
- - "application/json"
- parameters: []
- responses:
- 200:
- description: "successful operation"
- schema:
- type: "array"
- items:
- type: "string"
- security:
- - basicAuth: []
- post:
- tags:
- - "auditlogs"
- summary: "Get all of the clusters for audit logs"
- description: ""
- operationId: "getClustersForAuditLogPost"
- consumes:
- - "application/json"
- produces:
- - "application/json"
- parameters: []
- responses:
- 200:
- description: "successful operation"
- schema:
- type: "array"
- items:
- type: "string"
- security:
- - basicAuth: []
/audit/logs/schema/fields:
get:
tags:
@@ -826,6 +660,172 @@ paths:
description: "successful operation"
security:
- basicAuth: []
+ /audit/logs/serviceload:
+ get:
+ tags:
+ - "auditlogs"
+ summary: "The graph for showing the top users accessing the services"
+ description: ""
+ operationId: "getServiceLoadGet"
+ produces:
+ - "application/json"
+ parameters:
+ - name: "startIndex"
+ in: "query"
+ description: "Start index of the queried result"
+ required: false
+ type: "string"
+ - name: "page"
+ in: "query"
+ description: "Number of pages for the results"
+ required: false
+ type: "string"
+ default: "0"
+ - name: "pageSize"
+ in: "query"
+ description: "Page size of the results"
+ required: false
+ type: "string"
+ default: "1000"
+ - name: "sortBy"
+ in: "query"
+ description: "Sorting the results based on this field"
+ required: false
+ type: "string"
+ - name: "sortType"
+ in: "query"
+ description: "Type of sorting (osc, desc)"
+ required: false
+ type: "string"
+ - name: "start_time"
+ in: "query"
+ description: "Date range param which is suportted from browser url"
+ required: false
+ type: "string"
+ - name: "end_time"
+ in: "query"
+ description: "Date range param which is supported from browser url"
+ required: false
+ type: "string"
+ - name: "clusters"
+ in: "query"
+ description: "filter for clusters (comma separated list)"
+ required: false
+ type: "string"
+ - name: "includeMessage"
+ in: "query"
+ description: "Include query which will query against message column"
+ required: false
+ type: "string"
+ - name: "excludeMessage"
+ in: "query"
+ description: "Exclude query which will query against message column"
+ required: false
+ type: "string"
+ - name: "mustBe"
+ in: "query"
+ description: "Include the components, comma separated values"
+ required: false
+ type: "string"
+ - name: "mustNot"
+ in: "query"
+ description: "Exclude the components, comma separated values"
+ required: false
+ type: "string"
+ - name: "includeQuery"
+ in: "query"
+ description: "Include the values in query result e.g.:
[{message:*exception*}]"
+ required: false
+ type: "string"
+ - name: "excludeQuery"
+ in: "query"
+ description: "Exclude the values in query result e.g.:
[{message:*timeout*}]"
+ required: false
+ type: "string"
+ - name: "from"
+ in: "query"
+ description: "Date range param, start date"
+ required: false
+ type: "string"
+ - name: "to"
+ in: "query"
+ description: "Date range param, end date"
+ required: false
+ type: "string"
+ - name: "userList"
+ in: "query"
+ description: "Filter for users (comma separated list)"
+ required: false
+ type: "string"
+ responses:
+ 200:
+ description: "successful operation"
+ schema:
+ $ref: "#/definitions/BarGraphDataListResponse"
+ security:
+ - basicAuth: []
+ post:
+ tags:
+ - "auditlogs"
+ summary: "The graph for showing the top users accessing the services"
+ description: ""
+ operationId: "getServiceLoadPost"
+ consumes:
+ - "application/json"
+ produces:
+ - "application/json"
+ parameters:
+ - in: "body"
+ name: "body"
+ required: false
+ schema:
+ $ref: "#/definitions/AuditServiceLoadBodyRequest"
+ responses:
+ 200:
+ description: "successful operation"
+ schema:
+ $ref: "#/definitions/BarGraphDataListResponse"
+ security:
+ - basicAuth: []
+ /audit/logs/clusters:
+ get:
+ tags:
+ - "auditlogs"
+ summary: "Get all of the clusters for audit logs"
+ description: ""
+ operationId: "getClustersForAuditLogGet"
+ produces:
+ - "application/json"
+ parameters: []
+ responses:
+ 200:
+ description: "successful operation"
+ schema:
+ type: "array"
+ items:
+ type: "string"
+ security:
+ - basicAuth: []
+ post:
+ tags:
+ - "auditlogs"
+ summary: "Get all of the clusters for audit logs"
+ description: ""
+ operationId: "getClustersForAuditLogPost"
+ consumes:
+ - "application/json"
+ produces:
+ - "application/json"
+ parameters: []
+ responses:
+ 200:
+ description: "successful operation"
+ schema:
+ type: "array"
+ items:
+ type: "string"
+ security:
+ - basicAuth: []
/info/features:
get:
tags:
@@ -877,13 +877,13 @@ paths:
type: "object"
additionalProperties:
type: "boolean"
- /metadata:
+ /metadata/list:
get:
tags:
- "metadata"
- summary: "Get metadata"
+ summary: "Get metadata list"
description: ""
- operationId: "getMetadata"
+ operationId: "getMetadataList"
produces:
- "application/json"
parameters:
@@ -911,15 +911,17 @@ paths:
200:
description: "successful operation"
schema:
- $ref: "#/definitions/LogsearchMetaData"
+ type: "array"
+ items:
+ $ref: "#/definitions/LogsearchMetaData"
security:
- basicAuth: []
post:
tags:
- "metadata"
- summary: "Save metadata"
+ summary: "Save metadata list"
description: ""
- operationId: "saveMetadata"
+ operationId: "saveMetadataList"
produces:
- "application/json"
parameters:
@@ -927,7 +929,9 @@ paths:
name: "body"
required: false
schema:
- $ref: "#/definitions/LogsearchMetaData"
+ type: "array"
+ items:
+ $ref: "#/definitions/LogsearchMetaData"
responses:
200:
description: "successful operation"
@@ -938,27 +942,29 @@ paths:
delete:
tags:
- "metadata"
- summary: "Delete metadata"
+ summary: "Delete metadata list"
description: ""
- operationId: "deleteMetadata"
+ operationId: "deleteMetadataList"
parameters:
- in: "body"
name: "body"
required: false
schema:
- $ref: "#/definitions/LogsearchMetaData"
+ type: "array"
+ items:
+ $ref: "#/definitions/LogsearchMetaData"
responses:
default:
description: "successful operation"
security:
- basicAuth: []
- /metadata/list:
+ /metadata:
get:
tags:
- "metadata"
- summary: "Get metadata list"
+ summary: "Get metadata"
description: ""
- operationId: "getMetadataList"
+ operationId: "getMetadata"
produces:
- "application/json"
parameters:
@@ -986,17 +992,15 @@ paths:
200:
description: "successful operation"
schema:
- type: "array"
- items:
- $ref: "#/definitions/LogsearchMetaData"
+ $ref: "#/definitions/LogsearchMetaData"
security:
- basicAuth: []
post:
tags:
- "metadata"
- summary: "Save metadata list"
+ summary: "Save metadata"
description: ""
- operationId: "saveMetadataList"
+ operationId: "saveMetadata"
produces:
- "application/json"
parameters:
@@ -1004,9 +1008,7 @@ paths:
name: "body"
required: false
schema:
- type: "array"
- items:
- $ref: "#/definitions/LogsearchMetaData"
+ $ref: "#/definitions/LogsearchMetaData"
responses:
200:
description: "successful operation"
@@ -1017,17 +1019,15 @@ paths:
delete:
tags:
- "metadata"
- summary: "Delete metadata list"
+ summary: "Delete metadata"
description: ""
- operationId: "deleteMetadataList"
+ operationId: "deleteMetadata"
parameters:
- in: "body"
name: "body"
required: false
schema:
- type: "array"
- items:
- $ref: "#/definitions/LogsearchMetaData"
+ $ref: "#/definitions/LogsearchMetaData"
responses:
default:
description: "successful operation"
@@ -3157,13 +3157,13 @@ paths:
type: "string"
security:
- basicAuth: []
- /shipper/input/{clusterName}/services/{serviceName}:
+ /shipper/input/{clusterName}/services:
get:
tags:
- "shipper"
- summary: "Get shipper config"
+ summary: "Get service names"
description: ""
- operationId: "getShipperConfig"
+ operationId: "getServices"
produces:
- "application/json"
parameters:
@@ -3171,50 +3171,42 @@ paths:
in: "path"
required: true
type: "string"
- - name: "serviceName"
- in: "path"
- required: true
- type: "string"
responses:
200:
description: "successful operation"
schema:
- $ref: "#/definitions/LSServerInputConfig"
+ type: "array"
+ items:
+ type: "string"
security:
- basicAuth: []
- post:
+ /shipper/filters/{clusterName}/level:
+ get:
tags:
- "shipper"
- summary: "Set shipper config"
+ summary: "Get log level filter"
description: ""
- operationId: "createShipperConfig"
+ operationId: "getLogLevelFilters"
produces:
- "application/json"
parameters:
- - in: "body"
- name: "body"
- required: false
- schema:
- $ref: "#/definitions/LSServerInputConfig"
- name: "clusterName"
in: "path"
required: true
type: "string"
- - name: "serviceName"
- in: "path"
- required: true
- type: "string"
responses:
- default:
+ 200:
description: "successful operation"
+ schema:
+ $ref: "#/definitions/LSServerLogLevelFilterMap"
security:
- basicAuth: []
put:
tags:
- "shipper"
- summary: "Set shipper config"
+ summary: "Update log level filter"
description: ""
- operationId: "setShipperConfig"
+ operationId: "setLogLevelFilter"
produces:
- "application/json"
parameters:
@@ -3222,27 +3214,23 @@ paths:
name: "body"
required: false
schema:
- $ref: "#/definitions/LSServerInputConfig"
+ $ref: "#/definitions/LSServerLogLevelFilterMap"
- name: "clusterName"
in: "path"
required: true
type: "string"
- - name: "serviceName"
- in: "path"
- required: true
- type: "string"
responses:
default:
description: "successful operation"
security:
- basicAuth: []
- /shipper/input/{clusterName}/services:
+ /shipper/input/{clusterName}/services/{serviceName}:
get:
tags:
- "shipper"
- summary: "Get service names"
+ summary: "Get shipper config"
description: ""
- operationId: "getServices"
+ operationId: "getShipperConfig"
produces:
- "application/json"
parameters:
@@ -3250,42 +3238,50 @@ paths:
in: "path"
required: true
type: "string"
+ - name: "serviceName"
+ in: "path"
+ required: true
+ type: "string"
responses:
200:
description: "successful operation"
schema:
- type: "array"
- items:
- type: "string"
+ $ref: "#/definitions/LSServerInputConfig"
security:
- basicAuth: []
- /shipper/filters/{clusterName}/level:
- get:
+ post:
tags:
- "shipper"
- summary: "Get log level filter"
+ summary: "Set shipper config"
description: ""
- operationId: "getLogLevelFilters"
+ operationId: "createShipperConfig"
produces:
- "application/json"
parameters:
+ - in: "body"
+ name: "body"
+ required: false
+ schema:
+ $ref: "#/definitions/LSServerInputConfig"
- name: "clusterName"
in: "path"
required: true
type: "string"
+ - name: "serviceName"
+ in: "path"
+ required: true
+ type: "string"
responses:
- 200:
+ default:
description: "successful operation"
- schema:
- $ref: "#/definitions/LSServerLogLevelFilterMap"
security:
- basicAuth: []
put:
tags:
- "shipper"
- summary: "Update log level filter"
+ summary: "Set shipper config"
description: ""
- operationId: "setLogLevelFilter"
+ operationId: "setShipperConfig"
produces:
- "application/json"
parameters:
@@ -3293,11 +3289,15 @@ paths:
name: "body"
required: false
schema:
- $ref: "#/definitions/LSServerLogLevelFilterMap"
+ $ref: "#/definitions/LSServerInputConfig"
- name: "clusterName"
in: "path"
required: true
type: "string"
+ - name: "serviceName"
+ in: "path"
+ required: true
+ type: "string"
responses:
default:
description: "successful operation"
@@ -3415,66 +3415,6 @@ securityDefinitions:
basicAuth:
type: "basic"
definitions:
- BarGraphData:
- type: "object"
- properties:
- dataCount:
- type: "array"
- items:
- $ref: "#/definitions/NameValueData"
- name:
- type: "string"
- BarGraphDataListResponse:
- type: "object"
- properties:
- graphData:
- type: "array"
- items:
- $ref: "#/definitions/BarGraphData"
- NameValueData:
- type: "object"
- properties:
- name:
- type: "string"
- value:
- type: "string"
- AuditServiceLoadBodyRequest:
- type: "object"
- properties:
- startIndex:
- type: "string"
- page:
- type: "string"
- pageSize:
- type: "string"
- sortBy:
- type: "string"
- sortType:
- type: "string"
- start_time:
- type: "string"
- end_time:
- type: "string"
- clusters:
- type: "string"
- includeMessage:
- type: "string"
- excludeMessage:
- type: "string"
- mustBe:
- type: "string"
- mustNot:
- type: "string"
- includeQuery:
- type: "string"
- excludeQuery:
- type: "string"
- from:
- type: "string"
- to:
- type: "string"
- userList:
- type: "string"
AuditFieldMetadataResponse:
type: "object"
properties:
@@ -3509,27 +3449,23 @@ definitions:
format: "int32"
policy:
type: "string"
- reqContext:
- type: "string"
- action:
- type: "string"
reason:
type: "string"
text:
type: "string"
+ sess:
+ type: "string"
access:
type: "string"
- proxyUsers:
- type: "array"
- items:
- type: "string"
- logType:
+ reqContext:
type: "string"
tags:
type: "array"
items:
type: "string"
- sess:
+ logType:
+ type: "string"
+ action:
type: "string"
agent:
type: "string"
@@ -3557,6 +3493,10 @@ definitions:
type: "string"
tags_str:
type: "string"
+ proxyUsers:
+ type: "array"
+ items:
+ type: "string"
id:
type: "string"
type:
@@ -3566,12 +3506,12 @@ definitions:
_version_:
type: "integer"
format: "int64"
- log_message:
- type: "string"
bundle_id:
type: "string"
case_id:
type: "string"
+ log_message:
+ type: "string"
logfile_line_number:
type: "integer"
format: "int32"
@@ -3682,6 +3622,29 @@ definitions:
properties:
clusters:
type: "string"
+ BarGraphData:
+ type: "object"
+ properties:
+ dataCount:
+ type: "array"
+ items:
+ $ref: "#/definitions/NameValueData"
+ name:
+ type: "string"
+ BarGraphDataListResponse:
+ type: "object"
+ properties:
+ graphData:
+ type: "array"
+ items:
+ $ref: "#/definitions/BarGraphData"
+ NameValueData:
+ type: "object"
+ properties:
+ name:
+ type: "string"
+ value:
+ type: "string"
AuditBarGraphBodyRequest:
type: "object"
properties:
@@ -3801,6 +3764,43 @@ definitions:
type: "string"
format:
type: "string"
+ AuditServiceLoadBodyRequest:
+ type: "object"
+ properties:
+ startIndex:
+ type: "string"
+ page:
+ type: "string"
+ pageSize:
+ type: "string"
+ sortBy:
+ type: "string"
+ sortType:
+ type: "string"
+ start_time:
+ type: "string"
+ end_time:
+ type: "string"
+ clusters:
+ type: "string"
+ includeMessage:
+ type: "string"
+ excludeMessage:
+ type: "string"
+ mustBe:
+ type: "string"
+ mustNot:
+ type: "string"
+ includeQuery:
+ type: "string"
+ excludeQuery:
+ type: "string"
+ from:
+ type: "string"
+ to:
+ type: "string"
+ userList:
+ type: "string"
LogsearchMetaData:
type: "object"
properties:
@@ -3854,11 +3854,11 @@ definitions:
type: "string"
group:
type: "string"
+ ip:
+ type: "string"
logtime:
type: "string"
format: "date-time"
- ip:
- type: "string"
id:
type: "string"
file:
@@ -3866,12 +3866,12 @@ definitions:
_version_:
type: "integer"
format: "int64"
- log_message:
- type: "string"
bundle_id:
type: "string"
case_id:
type: "string"
+ log_message:
+ type: "string"
logfile_line_number:
type: "integer"
format: "int32"
@@ -4684,6 +4684,39 @@ definitions:
type: "string"
clusters:
type: "string"
+ LSServerLogLevelFilter:
+ type: "object"
+ required:
+ - "defaultLevels"
+ - "hosts"
+ - "label"
+ properties:
+ label:
+ type: "string"
+ hosts:
+ type: "array"
+ items:
+ type: "string"
+ defaultLevels:
+ type: "array"
+ items:
+ type: "string"
+ overrideLevels:
+ type: "array"
+ items:
+ type: "string"
+ expiryTime:
+ type: "string"
+ format: "date-time"
+ LSServerLogLevelFilterMap:
+ type: "object"
+ required:
+ - "filter"
+ properties:
+ filter:
+ type: "object"
+ additionalProperties:
+ $ref: "#/definitions/LSServerLogLevelFilter"
LSServerConditions:
type: "object"
required:
@@ -4795,45 +4828,12 @@ definitions:
$ref: "#/definitions/LSServerFilter"
LSServerPostMapValuesList:
type: "object"
- LSServerLogLevelFilter:
- type: "object"
- required:
- - "defaultLevels"
- - "hosts"
- - "label"
- properties:
- label:
- type: "string"
- hosts:
- type: "array"
- items:
- type: "string"
- defaultLevels:
- type: "array"
- items:
- type: "string"
- overrideLevels:
- type: "array"
- items:
- type: "string"
- expiryTime:
- type: "string"
- format: "date-time"
- LSServerLogLevelFilterMap:
- type: "object"
- required:
- - "filter"
- properties:
- filter:
- type: "object"
- additionalProperties:
- $ref: "#/definitions/LSServerLogLevelFilter"
SolrCollectionState:
type: "object"
properties:
- znodeReady:
- type: "boolean"
solrCollectionReady:
type: "boolean"
+ znodeReady:
+ type: "boolean"
configurationUploaded:
type: "boolean"
diff --git a/docs/logfeeder_properties.md b/docs/logfeeder_properties.md
index 8eec4cd..9f8f9eb 100644
--- a/docs/logfeeder_properties.md
+++ b/docs/logfeeder_properties.md
@@ -36,7 +36,7 @@ limitations under the License.
|`logfeeder.cloud.rollover.threshold.size`|Rollover cloud log files after the
log file size reach this limit|80|<ul><li>`1024`</li></ul>|
|`logfeeder.cloud.rollover.threshold.size.unit`|Rollover cloud log file size
unit (e.g: KB, MB etc.)|MB|<ul><li>`KB`</li></ul>|
|`logfeeder.cloud.rollover.use.gzip`|Use GZip on archived
logs.|true|<ul><li>`false`</li></ul>|
-|`logfeeder.cloud.storage.base.path`|Base path prefix for storing logs (cloud
storage / hdfs)|/apps/logsearch|<ul><li>`/user/logsearch/mypath`</li></ul>|
+|`logfeeder.cloud.storage.base.path`|Base path prefix for storing logs (cloud
storage / hdfs), could be an absolute path or URI. (if URI used, that will
override the default.FS with HDFS
client)|/apps/logsearch|<ul><li>`/user/logsearch/mypath`</li><li>`s3a:///user/logsearch`</li></ul>|
|`logfeeder.cloud.storage.bucket`|Amazon S3
bucket.|logfeeder|<ul><li>`logs`</li></ul>|
|`logfeeder.cloud.storage.bucket.bootstrap`|Create bucket on
startup.|true|<ul><li>`false`</li></ul>|
|`logfeeder.cloud.storage.custom.fs`|If it is not empty, override fs.defaultFS
for HDFS client. Can be useful to write data to a different bucket (from other
services) if the bucket address is read from
core-site.xml|`EMPTY`|<ul><li>`s3a://anotherbucket`</li></ul>|