This is an automated email from the ASF dual-hosted git repository.

pkarwasz pushed a commit to branch feature/remove-log4j-kubernetes-2.x
in repository https://gitbox.apache.org/repos/asf/logging-log4j2.git

commit 12672eaa7219da33df544cd5e57f7f417082e2be
Author: Piotr P. Karwasz <[email protected]>
AuthorDate: Mon Mar 25 13:17:07 2024 +0100

    Remove `log4j-kubernetes` in version 2.x
    
    Due to differences in the lifecycle of Log4j Core and Kubernetes Client,
    we remove `log4j-kubernetes` from the 2.x release and redirect users to
    Fabric8's own `kubernetes-log4j` artifact introduced in 
fabric8io/kubernetes-client#5718.
    
    The `log4j-kubernetes` lookup depends on:
    
    * the very stable `StrLookup` interface from Log4j Core,
    * the evolving set of Kubernetes metadata provided by Kubernetes Client.
    
    Therefore it makes more sense to distribute the lookup together with
    Kubernetes Client.
---
 log4j-kubernetes/pom.xml                           |  73 --
 .../logging/log4j/kubernetes/ContainerUtil.java    |  92 --
 .../log4j/kubernetes/KubernetesClientBuilder.java  |  74 --
 .../kubernetes/KubernetesClientProperties.java     | 193 -----
 .../logging/log4j/kubernetes/KubernetesLookup.java | 297 -------
 .../logging/log4j/kubernetes/package-info.java     |  24 -
 .../log4j/kubernetes/KubernetesLookupTest.java     |  98 ---
 .../src/test/resources/clusterPod.json             | 177 ----
 log4j-kubernetes/src/test/resources/localPod.json  | 141 ---
 log4j-parent/pom.xml                               |   9 -
 pom.xml                                            |   7 -
 src/site/asciidoc/log4j-kubernetes.adoc            | 202 -----
 src/site/asciidoc/manual/cloud.adoc                | 952 ++++++++++-----------
 src/site/asciidoc/manual/lookups.adoc              |  64 +-
 14 files changed, 443 insertions(+), 1960 deletions(-)

diff --git a/log4j-kubernetes/pom.xml b/log4j-kubernetes/pom.xml
deleted file mode 100644
index 359f9ef483..0000000000
--- a/log4j-kubernetes/pom.xml
+++ /dev/null
@@ -1,73 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  ~ Licensed to the Apache Software Foundation (ASF) under one or more
-  ~ contributor license agreements.  See the NOTICE file distributed with
-  ~ this work for additional information regarding copyright ownership.
-  ~ The ASF licenses this file to you under the Apache License, Version 2.0
-  ~ (the "License"); you may not use this file except in compliance with
-  ~ the License.  You may obtain a copy of the License at
-  ~
-  ~      http://www.apache.org/licenses/LICENSE-2.0
-  ~
-  ~ Unless required by applicable law or agreed to in writing, software
-  ~ distributed under the License is distributed on an "AS IS" BASIS,
-  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  ~ See the License for the specific language governing permissions and
-  ~ limitations under the License.
-  -->
-<project xmlns="http://maven.apache.org/POM/4.0.0"; 
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"; 
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/maven-v4_0_0.xsd";>
-
-  <modelVersion>4.0.0</modelVersion>
-
-  <parent>
-    <groupId>org.apache.logging.log4j</groupId>
-    <artifactId>log4j</artifactId>
-    <version>${revision}</version>
-    <relativePath>../log4j-parent</relativePath>
-  </parent>
-
-  <artifactId>log4j-kubernetes</artifactId>
-
-  <name>Apache Log4j Kubernetes Library</name>
-  <description>Apache Log4j Kubernetes Support</description>
-
-  <properties>
-
-    <!--
-      ~ OSGi and JPMS options
-      -->
-    <bnd-extra-module-options>
-      <!-- Filebased module names: MUST be static -->
-      
kubernetes.client;substitute="kubernetes-client";transitive=false;static=true,
-      
kubernetes.model.core;substitute="kubernetes-model-core";transitive=false;static=true
-    </bnd-extra-module-options>
-    <Fragment-Host>org.apache.logging.log4j.core</Fragment-Host>
-  </properties>
-
-  <dependencies>
-    <dependency>
-      <groupId>org.apache.logging.log4j</groupId>
-      <artifactId>log4j-api</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.logging.log4j</groupId>
-      <artifactId>log4j-core</artifactId>
-    </dependency>
-    <!-- Kubernetes Client -->
-    <dependency>
-      <groupId>io.fabric8</groupId>
-      <artifactId>kubernetes-client</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.junit.jupiter</groupId>
-      <artifactId>junit-jupiter-engine</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.junit.vintage</groupId>
-      <artifactId>junit-vintage-engine</artifactId>
-      <scope>test</scope>
-    </dependency>
-  </dependencies>
-
-</project>
diff --git 
a/log4j-kubernetes/src/main/java/org/apache/logging/log4j/kubernetes/ContainerUtil.java
 
b/log4j-kubernetes/src/main/java/org/apache/logging/log4j/kubernetes/ContainerUtil.java
deleted file mode 100644
index 814b2c11cf..0000000000
--- 
a/log4j-kubernetes/src/main/java/org/apache/logging/log4j/kubernetes/ContainerUtil.java
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to you under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.logging.log4j.kubernetes;
-
-import java.io.File;
-import java.io.IOException;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.util.Objects;
-import org.apache.logging.log4j.Logger;
-import org.apache.logging.log4j.status.StatusLogger;
-
-/**
- * Locate the current docker container.
- */
-public class ContainerUtil {
-    private static final Logger LOGGER = StatusLogger.getLogger();
-    private static final int MAXLENGTH = 65;
-
-    /**
-     * Returns the container id when running in a Docker container.
-     *
-     * This inspects /proc/self/cgroup looking for a Kubernetes Control Group. 
Once it finds one it attempts
-     * to isolate just the docker container id. There doesn't appear to be a 
standard way to do this, but
-     * it seems to be the only way to determine what the current container is 
in a multi-container pod. It would have
-     * been much nicer if Kubernetes would just put the container id in a 
standard environment variable.
-     *
-     * @see <a 
href="http://stackoverflow.com/a/25729598/12916";>Stackoverflow</a> for a 
discussion on retrieving the containerId.
-     * @see <a 
href="https://github.com/jenkinsci/docker-workflow-plugin/blob/master/src/main/java/org/jenkinsci/plugins/docker/workflow/client/ControlGroup.java";>ControlGroup</a>
-     * for the original version of this. Not much is actually left but it 
provided good inspiration.
-     * @return The container id.
-     */
-    public static String getContainerId() {
-        try {
-            final File file = new File("/proc/self/cgroup");
-            if (file.exists()) {
-                final Path path = file.toPath();
-                final String id = Files.lines(path)
-                        .map(ContainerUtil::getContainerId)
-                        .filter(Objects::nonNull)
-                        .findFirst()
-                        .orElse(null);
-                LOGGER.debug("Found container id {}", id);
-                return id;
-            }
-            LOGGER.warn("Unable to access container information");
-        } catch (IOException ioe) {
-            LOGGER.warn("Error obtaining container id: {}", ioe.getMessage());
-        }
-        return null;
-    }
-
-    private static String getContainerId(String line) {
-        // Every control group in Kubernetes will use
-        if (line.contains("/kubepods")) {
-            // Strip off everything up to the last slash.
-            int i = line.lastIndexOf('/');
-            if (i < 0) {
-                return null;
-            }
-            // If the remainder has a period then take everything up to it.
-            line = line.substring(i + 1);
-            i = line.lastIndexOf('.');
-            if (i > 0) {
-                line = line.substring(0, i);
-            }
-            // Everything ending with a '/' has already been stripped but the 
remainder might start with "docker-"
-            if (line.contains("docker-")) {
-                // 
8:cpuset:/kubepods.slice/kubepods-pod9c26dfb6_b9c9_11e7_bfb9_02c6c1fc4861.slice/docker-3dd988081e7149463c043b5d9c57d7309e079c5e9290f91feba1cc45a04d6a5b.scope
-                i = line.lastIndexOf("docker-");
-                line = line.substring(i + 7);
-            }
-            return line.length() <= MAXLENGTH ? line : line.substring(0, 
MAXLENGTH);
-        }
-
-        return null;
-    }
-}
diff --git 
a/log4j-kubernetes/src/main/java/org/apache/logging/log4j/kubernetes/KubernetesClientBuilder.java
 
b/log4j-kubernetes/src/main/java/org/apache/logging/log4j/kubernetes/KubernetesClientBuilder.java
deleted file mode 100644
index 3847c74a6b..0000000000
--- 
a/log4j-kubernetes/src/main/java/org/apache/logging/log4j/kubernetes/KubernetesClientBuilder.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to you under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.logging.log4j.kubernetes;
-
-import io.fabric8.kubernetes.client.Config;
-import io.fabric8.kubernetes.client.ConfigBuilder;
-import io.fabric8.kubernetes.client.DefaultKubernetesClient;
-import io.fabric8.kubernetes.client.KubernetesClient;
-import org.apache.logging.log4j.Logger;
-import org.apache.logging.log4j.status.StatusLogger;
-
-/**
- * Builds a Kubernetes Client.
- */
-public class KubernetesClientBuilder {
-
-    private static final Logger LOGGER = StatusLogger.getLogger();
-
-    public KubernetesClient createClient() {
-        final Config config = kubernetesClientConfig();
-        return config != null ? new DefaultKubernetesClient(config) : null;
-    }
-
-    private Config kubernetesClientConfig() {
-        Config base = null;
-        try {
-            base = Config.autoConfigure(null);
-        } catch (Exception ex) {
-            if (ex instanceof NullPointerException) {
-                return null;
-            }
-        }
-        final KubernetesClientProperties props = new 
KubernetesClientProperties(base);
-        final Config properties = new ConfigBuilder(base)
-                .withApiVersion(props.getApiVersion())
-                .withCaCertData(props.getCaCertData())
-                .withCaCertFile(props.getCaCertFile())
-                .withClientCertData(props.getClientCertData())
-                .withClientCertFile(props.getClientCertFile())
-                .withClientKeyAlgo(props.getClientKeyAlgo())
-                .withClientKeyData(props.getClientKeyData())
-                .withClientKeyFile(props.getClientKeyFile())
-                .withClientKeyPassphrase(props.getClientKeyPassphrase())
-                .withConnectionTimeout(props.getConnectionTimeout())
-                .withHttpProxy(props.getHttpProxy())
-                .withHttpsProxy(props.getHttpsProxy())
-                .withMasterUrl(props.getMasterUrl())
-                .withNamespace(props.getNamespace())
-                .withNoProxy(props.getNoProxy())
-                .withPassword(props.getPassword())
-                .withProxyPassword(props.getProxyPassword())
-                .withProxyUsername(props.getProxyUsername())
-                .withRequestTimeout(props.getRequestTimeout())
-                .withRollingTimeout(props.getRollingTimeout())
-                .withTrustCerts(props.isTrustCerts())
-                .withUsername(props.getUsername())
-                .build();
-        return properties;
-    }
-}
diff --git 
a/log4j-kubernetes/src/main/java/org/apache/logging/log4j/kubernetes/KubernetesClientProperties.java
 
b/log4j-kubernetes/src/main/java/org/apache/logging/log4j/kubernetes/KubernetesClientProperties.java
deleted file mode 100644
index 0e3bac3811..0000000000
--- 
a/log4j-kubernetes/src/main/java/org/apache/logging/log4j/kubernetes/KubernetesClientProperties.java
+++ /dev/null
@@ -1,193 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to you under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.logging.log4j.kubernetes;
-
-import io.fabric8.kubernetes.client.Config;
-import java.time.Duration;
-import org.apache.logging.log4j.util.PropertiesUtil;
-
-/**
- * Obtains properties used to configure the Kubernetes client.
- */
-public class KubernetesClientProperties {
-
-    private static final String[] PREFIXES = {"log4j2.kubernetes.client.", 
"spring.cloud.kubernetes.client."};
-    private static final String API_VERSION = "apiVersion";
-    private static final String CA_CERT_FILE = "caCertFile";
-    private static final String CA_CERT_DATA = "caCertData";
-    private static final String CLIENT_CERT_FILE = "clientCertFile";
-    private static final String CLIENT_CERT_DATA = "clientCertData";
-    private static final String CLIENT_KEY_FILE = "clientKeyFile";
-    private static final String CLIENT_KEY_DATA = "clientKeyData";
-    private static final String CLIENT_KEY_DATA_TYPO = "cientKeyData";
-    private static final String CLIENT_KEY_ALGO = "clientKeyAlgo";
-    private static final String CLIENT_KEY_PASSPHRASE = "clientKeyPassphrase";
-    private static final String CONNECTION_TIMEOUT = "connectionTimeout";
-    private static final String HTTP_PROXY = "httpProxy";
-    private static final String HTTPS_PROXY = "httpsProxy";
-    private static final String LOGGING_INTERVAL = "loggingInterval";
-    private static final String MASTER_URL = "masterUrl";
-    private static final String NAMESPACE = "namespace";
-    private static final String NO_PROXY = "noProxy";
-    private static final String PASSWORD = "password";
-    private static final String PROXY_USERNAME = "proxyUsername";
-    private static final String PROXY_PASSWORD = "proxyPassword";
-    private static final String REQUEST_TIMEOUT = "requestTimeout";
-    private static final String ROLLING_TIMEOUT = "rollingTimeout";
-    private static final String TRUST_CERTS = "trustCerts";
-    private static final String USERNAME = "username";
-    private static final String WATCH_RECONNECT_INTERVAL = 
"watchReconnectInterval";
-    private static final String WATCH_RECONNECT_LIMIT = "watchReconnectLimit";
-
-    private final PropertiesUtil props = PropertiesUtil.getProperties();
-    private final Config base;
-
-    public KubernetesClientProperties(final Config base) {
-        this.base = base;
-    }
-
-    public String getApiVersion() {
-        return props.getStringProperty(PREFIXES, API_VERSION, 
base::getApiVersion);
-    }
-
-    public String getCaCertFile() {
-        return props.getStringProperty(PREFIXES, CA_CERT_FILE, 
base::getCaCertFile);
-    }
-
-    public String getCaCertData() {
-        return props.getStringProperty(PREFIXES, CA_CERT_DATA, 
base::getCaCertData);
-    }
-
-    public String getClientCertFile() {
-        return props.getStringProperty(PREFIXES, CLIENT_CERT_FILE, 
base::getClientCertFile);
-    }
-
-    public String getClientCertData() {
-        return props.getStringProperty(PREFIXES, CLIENT_CERT_DATA, 
base::getClientCertData);
-    }
-
-    public String getClientKeyFile() {
-        return props.getStringProperty(PREFIXES, CLIENT_KEY_FILE, 
base::getClientKeyFile);
-    }
-
-    public String getClientKeyData() {
-        return props.getStringProperty(
-                PREFIXES,
-                CLIENT_KEY_DATA,
-                () -> props.getStringProperty(PREFIXES, CLIENT_KEY_DATA_TYPO, 
base::getClientKeyData));
-    }
-
-    public String getClientKeyAlgo() {
-        return props.getStringProperty(PREFIXES, CLIENT_KEY_ALGO, 
base::getClientKeyAlgo);
-    }
-
-    public String getClientKeyPassphrase() {
-        return props.getStringProperty(PREFIXES, CLIENT_KEY_PASSPHRASE, 
base::getClientKeyPassphrase);
-    }
-
-    public int getConnectionTimeout() {
-        final Duration timeout = props.getDurationProperty(PREFIXES, 
CONNECTION_TIMEOUT, null);
-        if (timeout != null) {
-            return (int) timeout.toMillis();
-        }
-        return base.getConnectionTimeout();
-    }
-
-    public String getHttpProxy() {
-        return props.getStringProperty(PREFIXES, HTTP_PROXY, 
base::getHttpProxy);
-    }
-
-    public String getHttpsProxy() {
-        return props.getStringProperty(PREFIXES, HTTPS_PROXY, 
base::getHttpsProxy);
-    }
-
-    public int getLoggingInterval() {
-        final Duration interval = props.getDurationProperty(PREFIXES, 
LOGGING_INTERVAL, null);
-        if (interval != null) {
-            return (int) interval.toMillis();
-        }
-        return base.getLoggingInterval();
-    }
-
-    public String getMasterUrl() {
-        return props.getStringProperty(PREFIXES, MASTER_URL, 
base::getMasterUrl);
-    }
-
-    public String getNamespace() {
-        return props.getStringProperty(PREFIXES, NAMESPACE, 
base::getNamespace);
-    }
-
-    public String[] getNoProxy() {
-        final String result = props.getStringProperty(PREFIXES, NO_PROXY, 
null);
-        if (result != null) {
-            return result.replace("\\s", "").split(",");
-        }
-        return base.getNoProxy();
-    }
-
-    public String getPassword() {
-        return props.getStringProperty(PREFIXES, PASSWORD, base::getPassword);
-    }
-
-    public String getProxyUsername() {
-        return props.getStringProperty(PREFIXES, PROXY_USERNAME, 
base::getProxyUsername);
-    }
-
-    public String getProxyPassword() {
-        return props.getStringProperty(PREFIXES, PROXY_PASSWORD, 
base::getProxyPassword);
-    }
-
-    public int getRequestTimeout() {
-        final Duration interval = props.getDurationProperty(PREFIXES, 
REQUEST_TIMEOUT, null);
-        if (interval != null) {
-            return (int) interval.toMillis();
-        }
-        return base.getRequestTimeout();
-    }
-
-    public long getRollingTimeout() {
-        final Duration interval = props.getDurationProperty(PREFIXES, 
ROLLING_TIMEOUT, null);
-        if (interval != null) {
-            return interval.toMillis();
-        }
-        return base.getRollingTimeout();
-    }
-
-    public Boolean isTrustCerts() {
-        return props.getBooleanProperty(PREFIXES, TRUST_CERTS, 
base::isTrustCerts);
-    }
-
-    public String getUsername() {
-        return props.getStringProperty(PREFIXES, USERNAME, base::getUsername);
-    }
-
-    public int getWatchReconnectInterval() {
-        final Duration interval = props.getDurationProperty(PREFIXES, 
WATCH_RECONNECT_INTERVAL, null);
-        if (interval != null) {
-            return (int) interval.toMillis();
-        }
-        return base.getWatchReconnectInterval();
-    }
-
-    public int getWatchReconnectLimit() {
-        final Duration interval = props.getDurationProperty(PREFIXES, 
WATCH_RECONNECT_LIMIT, null);
-        if (interval != null) {
-            return (int) interval.toMillis();
-        }
-        return base.getWatchReconnectLimit();
-    }
-}
diff --git 
a/log4j-kubernetes/src/main/java/org/apache/logging/log4j/kubernetes/KubernetesLookup.java
 
b/log4j-kubernetes/src/main/java/org/apache/logging/log4j/kubernetes/KubernetesLookup.java
deleted file mode 100644
index 703e4e6339..0000000000
--- 
a/log4j-kubernetes/src/main/java/org/apache/logging/log4j/kubernetes/KubernetesLookup.java
+++ /dev/null
@@ -1,297 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to you under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.logging.log4j.kubernetes;
-
-import io.fabric8.kubernetes.api.model.Container;
-import io.fabric8.kubernetes.api.model.ContainerStatus;
-import io.fabric8.kubernetes.api.model.Namespace;
-import io.fabric8.kubernetes.api.model.Pod;
-import io.fabric8.kubernetes.client.Config;
-import io.fabric8.kubernetes.client.KubernetesClient;
-import java.net.URL;
-import java.nio.file.Paths;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantLock;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-import org.apache.logging.log4j.core.LogEvent;
-import org.apache.logging.log4j.core.config.plugins.Plugin;
-import org.apache.logging.log4j.core.lookup.AbstractLookup;
-import org.apache.logging.log4j.core.lookup.StrLookup;
-import org.apache.logging.log4j.status.StatusLogger;
-import org.apache.logging.log4j.util.LoaderUtil;
-import org.apache.logging.log4j.util.Strings;
-
-/**
- * Retrieve various Kubernetes attributes. Supported keys are:
- *  accountName, containerId, containerName, clusterName, host, hostIp, 
labels, labels.app,
- *  labels.podTemplateHash, masterUrl, namespaceId, namespaceName, podId, 
podIp, podName,
- *  imageId, imageName.
- */
-@Plugin(name = "k8s", category = StrLookup.CATEGORY)
-public class KubernetesLookup extends AbstractLookup {
-
-    private static final Logger LOGGER = StatusLogger.getLogger();
-    private static final String HOSTNAME = "HOSTNAME";
-    private static final String SPRING_ENVIRONMENT_KEY = "SpringEnvironment";
-
-    private static volatile KubernetesInfo kubernetesInfo;
-    private static final Lock initLock = new ReentrantLock();
-    private static final boolean isSpringIncluded =
-            
LoaderUtil.isClassAvailable("org.apache.logging.log4j.spring.cloud.config.client.SpringEnvironmentHolder")
-                    || 
LoaderUtil.isClassAvailable("org.apache.logging.log4j.spring.boot.SpringEnvironmentHolder");
-    private Pod pod;
-    private Namespace namespace;
-    private URL masterUrl;
-
-    public KubernetesLookup() {
-        this.pod = null;
-        this.namespace = null;
-        this.masterUrl = null;
-        initialize();
-    }
-
-    KubernetesLookup(Pod pod, Namespace namespace, URL masterUrl) {
-        this.pod = pod;
-        this.namespace = namespace;
-        this.masterUrl = masterUrl;
-        initialize();
-    }
-
-    private boolean initialize() {
-        if (kubernetesInfo == null || (isSpringIncluded && 
!kubernetesInfo.isSpringActive)) {
-            initLock.lock();
-            try {
-                final boolean isSpringActive = isSpringActive();
-                if (kubernetesInfo == null || (!kubernetesInfo.isSpringActive 
&& isSpringActive)) {
-                    final KubernetesInfo info = new KubernetesInfo();
-                    KubernetesClient client = null;
-                    info.isSpringActive = isSpringActive;
-                    if (pod == null) {
-                        client = new KubernetesClientBuilder().createClient();
-                        if (client != null) {
-                            pod = getCurrentPod(System.getenv(HOSTNAME), 
client);
-                            info.masterUrl = client.getMasterUrl();
-                            if (pod != null) {
-                                info.namespace = 
pod.getMetadata().getNamespace();
-                                namespace = client.namespaces()
-                                        .withName(info.namespace)
-                                        .get();
-                            }
-                        } else {
-                            LOGGER.warn("Kubernetes is not available for 
access");
-                        }
-                    } else {
-                        info.masterUrl = masterUrl;
-                    }
-                    if (pod != null) {
-                        if (namespace != null) {
-                            info.namespaceId = 
namespace.getMetadata().getUid();
-                            info.namespaceAnnotations = 
namespace.getMetadata().getAnnotations();
-                            info.namespaceLabels = 
namespace.getMetadata().getLabels();
-                        }
-                        info.app = pod.getMetadata().getLabels().get("app");
-                        info.hostName = pod.getSpec().getNodeName();
-                        info.annotations = pod.getMetadata().getAnnotations();
-                        final String app = info.app != null ? info.app : "";
-                        info.podTemplateHash = 
pod.getMetadata().getLabels().get("pod-template-hash");
-                        info.accountName = 
pod.getSpec().getServiceAccountName();
-                        info.clusterName = pod.getMetadata().getClusterName();
-                        info.hostIp = pod.getStatus().getHostIP();
-                        info.labels = pod.getMetadata().getLabels();
-                        info.podId = pod.getMetadata().getUid();
-                        info.podIp = pod.getStatus().getPodIP();
-                        info.podName = pod.getMetadata().getName();
-                        ContainerStatus containerStatus = null;
-                        final List<ContainerStatus> statuses = 
pod.getStatus().getContainerStatuses();
-                        if (statuses.size() == 1) {
-                            containerStatus = statuses.get(0);
-                        } else if (statuses.size() > 1) {
-                            final String containerId = 
ContainerUtil.getContainerId();
-                            if (containerId != null) {
-                                containerStatus = statuses.stream()
-                                        .filter(cs -> 
cs.getContainerID().contains(containerId))
-                                        .findFirst()
-                                        .orElse(null);
-                            }
-                        }
-                        final String containerName;
-                        if (containerStatus != null) {
-                            info.containerId = 
containerStatus.getContainerID();
-                            info.imageId = containerStatus.getImageID();
-                            containerName = containerStatus.getName();
-                        } else {
-                            containerName = null;
-                        }
-                        Container container = null;
-                        final List<Container> containers = 
pod.getSpec().getContainers();
-                        if (containers.size() == 1) {
-                            container = containers.get(0);
-                        } else if (containers.size() > 1 && containerName != 
null) {
-                            container = containers.stream()
-                                    .filter(c -> 
c.getName().equals(containerName))
-                                    .findFirst()
-                                    .orElse(null);
-                        }
-                        if (container != null) {
-                            info.containerName = container.getName();
-                            info.imageName = container.getImage();
-                        }
-
-                        kubernetesInfo = info;
-                    }
-                }
-            } finally {
-                initLock.unlock();
-            }
-        }
-        return kubernetesInfo != null;
-    }
-
-    @Override
-    public String lookup(final LogEvent event, final String key) {
-        if (kubernetesInfo == null) {
-            return null;
-        }
-        switch (key) {
-            case "accountName": {
-                return kubernetesInfo.accountName;
-            }
-            case "annotations": {
-                return kubernetesInfo.annotations.toString();
-            }
-            case "containerId": {
-                return kubernetesInfo.containerId;
-            }
-            case "containerName": {
-                return kubernetesInfo.containerName;
-            }
-            case "clusterName": {
-                return kubernetesInfo.clusterName;
-            }
-            case "host": {
-                return kubernetesInfo.hostName;
-            }
-            case "hostIp": {
-                return kubernetesInfo.hostIp;
-            }
-            case "labels": {
-                return kubernetesInfo.labels.toString();
-            }
-            case "labels.app": {
-                return kubernetesInfo.app;
-            }
-            case "labels.podTemplateHash": {
-                return kubernetesInfo.podTemplateHash;
-            }
-            case "masterUrl": {
-                return kubernetesInfo.masterUrl.toString();
-            }
-            case "namespaceAnnotations": {
-                return kubernetesInfo.namespaceAnnotations.toString();
-            }
-            case "namespaceId": {
-                return kubernetesInfo.namespaceId;
-            }
-            case "namespaceLabels": {
-                return kubernetesInfo.namespaceLabels.toString();
-            }
-            case "namespaceName": {
-                return kubernetesInfo.namespace;
-            }
-            case "podId": {
-                return kubernetesInfo.podId;
-            }
-            case "podIp": {
-                return kubernetesInfo.podIp;
-            }
-            case "podName": {
-                return kubernetesInfo.podName;
-            }
-            case "imageId": {
-                return kubernetesInfo.imageId;
-            }
-            case "imageName": {
-                return kubernetesInfo.imageName;
-            }
-            default:
-                return null;
-        }
-    }
-
-    /**
-     * For unit testing only.
-     */
-    void clearInfo() {
-        kubernetesInfo = null;
-    }
-
-    private String getHostname() {
-        return System.getenv(HOSTNAME);
-    }
-
-    private Pod getCurrentPod(final String hostName, final KubernetesClient 
kubernetesClient) {
-        try {
-            if (isServiceAccount() && Strings.isNotBlank(hostName)) {
-                return kubernetesClient.pods().withName(hostName).get();
-            }
-        } catch (Throwable t) {
-            LOGGER.debug("Unable to locate pod with name {}.", hostName);
-        }
-        return null;
-    }
-
-    private boolean isServiceAccount() {
-        return 
Paths.get(Config.KUBERNETES_SERVICE_ACCOUNT_TOKEN_PATH).toFile().exists()
-                && Paths.get(Config.KUBERNETES_SERVICE_ACCOUNT_CA_CRT_PATH)
-                        .toFile()
-                        .exists();
-    }
-
-    private boolean isSpringActive() {
-        return isSpringIncluded
-                && LogManager.getFactory() != null
-                && 
LogManager.getFactory().hasContext(KubernetesLookup.class.getName(), null, 
false)
-                && 
LogManager.getContext(false).getObject(SPRING_ENVIRONMENT_KEY) != null;
-    }
-
-    private static class KubernetesInfo {
-        boolean isSpringActive;
-        String accountName;
-        Map<String, String> annotations;
-        String app;
-        String clusterName;
-        String containerId;
-        String containerName;
-        String hostName;
-        String hostIp;
-        String imageId;
-        String imageName;
-        Map<String, String> labels;
-        URL masterUrl;
-        String namespace;
-        Map<String, String> namespaceAnnotations;
-        String namespaceId;
-        Map<String, String> namespaceLabels;
-        String podId;
-        String podIp;
-        String podName;
-        String podTemplateHash;
-    }
-}
diff --git 
a/log4j-kubernetes/src/main/java/org/apache/logging/log4j/kubernetes/package-info.java
 
b/log4j-kubernetes/src/main/java/org/apache/logging/log4j/kubernetes/package-info.java
deleted file mode 100644
index 5eeaf75555..0000000000
--- 
a/log4j-kubernetes/src/main/java/org/apache/logging/log4j/kubernetes/package-info.java
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to you under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-@Export
-@Open("org.apache.logging.log4j.core")
-@Version("2.20.1")
-package org.apache.logging.log4j.kubernetes;
-
-import aQute.bnd.annotation.jpms.Open;
-import org.osgi.annotation.bundle.Export;
-import org.osgi.annotation.versioning.Version;
diff --git 
a/log4j-kubernetes/src/test/java/org/apache/logging/log4j/kubernetes/KubernetesLookupTest.java
 
b/log4j-kubernetes/src/test/java/org/apache/logging/log4j/kubernetes/KubernetesLookupTest.java
deleted file mode 100644
index e01a1051d5..0000000000
--- 
a/log4j-kubernetes/src/test/java/org/apache/logging/log4j/kubernetes/KubernetesLookupTest.java
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to you under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.logging.log4j.kubernetes;
-
-import static org.junit.Assert.assertEquals;
-
-import com.fasterxml.jackson.databind.ObjectMapper;
-import io.fabric8.kubernetes.api.model.Namespace;
-import io.fabric8.kubernetes.api.model.ObjectMeta;
-import io.fabric8.kubernetes.api.model.Pod;
-import java.io.File;
-import java.net.URL;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.UUID;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-/**
- * Validate the Kubernetes Lookup.
- */
-public class KubernetesLookupTest {
-
-    private static final String localJson = 
"target/test-classes/localPod.json";
-    private static final String clusterJson = 
"target/test-classes/clusterPod.json";
-    private static final ObjectMapper objectMapper = new ObjectMapper();
-    public static URL masterUrl;
-
-    @BeforeClass
-    public static void beforeClass() throws Exception {
-        masterUrl = new URL("http://localhost:443/";);
-    }
-
-    @Test
-    public void testLocal() throws Exception {
-        final Pod pod = objectMapper.readValue(new File(localJson), Pod.class);
-        final Namespace namespace = createNamespace();
-        final KubernetesLookup lookup = new KubernetesLookup(pod, namespace, 
masterUrl);
-        try {
-            assertEquals("Incorrect container name", "sampleapp", 
lookup.lookup("containerName"));
-            assertEquals(
-                    "Incorrect container id",
-                    
"docker://818b0098946c67e6ac56cb7c0934b7c2a9f50feb7244b422b2a7f566f7e5d0df",
-                    lookup.lookup("containerId"));
-            assertEquals("Incorrect host name", "docker-desktop", 
lookup.lookup("host"));
-            assertEquals("Incorrect pod name", "sampleapp-584f99476d-mnrp4", 
lookup.lookup("podName"));
-        } finally {
-            lookup.clearInfo();
-        }
-    }
-
-    @Test
-    public void testCluster() throws Exception {
-        final Pod pod = objectMapper.readValue(new File(clusterJson), 
Pod.class);
-        final Namespace namespace = createNamespace();
-        final KubernetesLookup lookup = new KubernetesLookup(pod, namespace, 
masterUrl);
-        try {
-            assertEquals("Incorrect container name", "platform-forms-service", 
lookup.lookup("containerName"));
-            assertEquals(
-                    "Incorrect container id",
-                    
"docker://2b7c2a93dfb48334aa549e29fdd38039ddd256eec43ba64c145fa4b75a1542f0",
-                    lookup.lookup("containerId"));
-            assertEquals("Incorrect host name", "k8s-tmpcrm-worker-s03-04", 
lookup.lookup("host"));
-            assertEquals(
-                    "Incorrect pod name", 
"platform-forms-service-primary-5ddfc4f9b8-kfpzv", lookup.lookup("podName"));
-        } finally {
-            lookup.clearInfo();
-        }
-    }
-
-    private Namespace createNamespace() {
-        final Namespace namespace = new Namespace();
-        final ObjectMeta meta = new ObjectMeta();
-        final Map<String, String> annotations = new HashMap<>();
-        annotations.put("test", "name");
-        meta.setAnnotations(annotations);
-        final Map<String, String> labels = new HashMap<>();
-        labels.put("ns", "my-namespace");
-        meta.setLabels(labels);
-        meta.setUid(UUID.randomUUID().toString());
-        namespace.setMetadata(meta);
-        return namespace;
-    }
-}
diff --git a/log4j-kubernetes/src/test/resources/clusterPod.json 
b/log4j-kubernetes/src/test/resources/clusterPod.json
deleted file mode 100644
index 7bae9c35eb..0000000000
--- a/log4j-kubernetes/src/test/resources/clusterPod.json
+++ /dev/null
@@ -1,177 +0,0 @@
-{
-  "apiVersion": "v1",
-  "kind": "Pod",
-  "metadata": {
-    "annotations": {
-      "cni.projectcalico.org/podIP": "172.16.55.101/32",
-      "cni.projectcalico.org/podIPs": "172.16.55.101/32",
-      "flagger-id": "94d53b7b-cc06-41b3-bbac-a2d14a16d95d",
-      "prometheus.io/port": "9797",
-      "prometheus.io/scrape": "true"
-    },
-    "creationTimestamp": "2020-06-15T15:44:16Z",
-    "generateName": "platform-forms-service-primary-5ddfc4f9b8-",
-    "labels": {
-      "app": "platform-forms-service-primary",
-      "pod-template-hash": "5ddfc4f9b8"
-    },
-    "name": "platform-forms-service-primary-5ddfc4f9b8-kfpzv",
-    "namespace": "default",
-    "ownerReferences": [
-      {
-        "apiVersion": "apps/v1",
-        "kind": "ReplicaSet",
-        "blockOwnerDeletion": true,
-        "controller": true,
-        "name": "platform-forms-service-primary-5ddfc4f9b8",
-        "uid": "d2e89c56-7623-439e-a9ee-4a67e2f3a81a"
-      }],
-    "resourceVersion": "37382150",
-    "selfLink": 
"/api/v1/namespaces/default/pods/platform-forms-service-primary-5ddfc4f9b8-kfpzv",
-    "uid": "df8cbac1-129c-4cd3-b5bc-65d72d8ba5f0"
-  },
-  "spec": {
-    "containers": [
-      {
-        "env": [
-          {
-            "name": "APACHE_ENV",
-            "value": "tmpcrm"
-          },
-          {
-            "name": "SPRING_PROFILES_ACTIVE",
-            "value": "tmpcrm"
-          },
-          {
-            "name": "JAVA_OPTS",
-            "value": "-Dlogging.label=crm"
-          }],
-        "image": "docker.apache.xyz/platform-forms-service:0.15.0",
-        "imagePullPolicy": "Always",
-        "livenessProbe": {
-          "failureThreshold": 3,
-          "httpGet": {
-            "path": "/info",
-            "port": "http",
-            "scheme": "HTTP"
-          },
-          "periodSeconds": 10,
-          "successThreshold": 1,
-          "timeoutSeconds": 1
-        },
-        "name": "platform-forms-service",
-        "ports": [
-          {
-            "containerPort": 8080,
-            "name": "http",
-            "protocol": "TCP"
-          }],
-        "readinessProbe": {
-          "failureThreshold": 3,
-          "httpGet": {
-            "path": "/health",
-            "port": "http",
-            "scheme": "HTTP"
-          },
-          "periodSeconds": 10,
-          "successThreshold": 1,
-          "timeoutSeconds": 1
-        },
-        "resources": {
-        },
-        "securityContext": {
-        },
-        "terminationMessagePath": "/dev/termination-log",
-        "terminationMessagePolicy": "File",
-        "volumeMounts": [
-          {
-            "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount",
-            "name": "default-token-2nqlw",
-            "readOnly": true
-          }]
-      }],
-    "dnsPolicy": "ClusterFirst",
-    "enableServiceLinks": true,
-    "nodeName": "k8s-tmpcrm-worker-s03-04",
-    "priority": 0,
-    "restartPolicy": "Always",
-    "schedulerName": "default-scheduler",
-    "securityContext": {
-    },
-    "serviceAccount": "default",
-    "serviceAccountName": "default",
-    "terminationGracePeriodSeconds": 30,
-    "tolerations": [
-      {
-        "effect": "NoExecute",
-        "key": "node.kubernetes.io/not-ready",
-        "operator": "Exists",
-        "tolerationSeconds": 300
-      },
-      {
-        "effect": "NoExecute",
-        "key": "node.kubernetes.io/unreachable",
-        "operator": "Exists",
-        "tolerationSeconds": 300
-      }],
-    "volumes": [
-      {
-        "name": "default-token-2nqlw",
-        "secret": {
-          "defaultMode": 420,
-          "secretName": "default-token-2nqlw"
-        }
-      }]
-  },
-  "status": {
-    "conditions": [
-      {
-        "lastTransitionTime": "2020-06-15T15:44:16Z",
-        "status": "True",
-        "type": "Initialized"
-      },
-      {
-        "lastTransitionTime": "2020-06-15T15:44:46Z",
-        "status": "True",
-        "type": "Ready"
-      },
-      {
-        "lastTransitionTime": "2020-06-15T15:44:46Z",
-        "status": "True",
-        "type": "ContainersReady"
-      },
-      {
-        "lastTransitionTime": "2020-06-15T15:44:16Z",
-        "status": "True",
-        "type": "PodScheduled"
-      }],
-    "containerStatuses": [
-      {
-        "containerID": 
"docker://2b7c2a93dfb48334aa549e29fdd38039ddd256eec43ba64c145fa4b75a1542f0",
-        "image": "docker.apache.xyz/platform-forms-service:0.15.0",
-        "imageID":
-        
"docker-pullable://docker.apache.xyz/platform-forms-service@sha256:45fd19ccd99e218a7685c4cee5bc5b16aeae1cdb8e8773f9c066d4cfb22ee195",
-        "lastState": {
-        },
-        "name": "platform-forms-service",
-        "ready": true,
-        "restartCount": 0,
-        "state": {
-          "running": {
-            "startedAt": "2020-06-15T15:44:21Z"
-          }
-        },
-        "started": true
-      }],
-    "hostIP": "10.103.220.170",
-    "phase": "Running",
-    "podIP": "172.16.55.101",
-    "qosClass": "BestEffort",
-    "startTime": "2020-06-15T15:44:16Z",
-    "podIPs": [
-      {
-        "ip": "172.16.55.101"
-      }]
-  }
-}
-
diff --git a/log4j-kubernetes/src/test/resources/localPod.json 
b/log4j-kubernetes/src/test/resources/localPod.json
deleted file mode 100644
index 3aeef46724..0000000000
--- a/log4j-kubernetes/src/test/resources/localPod.json
+++ /dev/null
@@ -1,141 +0,0 @@
-{
-  "apiVersion": "v1",
-  "kind": "Pod",
-  "metadata": {
-    "creationTimestamp": "2020-06-14T21:50:09Z",
-    "generateName": "sampleapp-584f99476d-",
-    "labels": {
-      "app": "sampleapp",
-      "pod-template-hash": "584f99476d"
-    },
-    "name": "sampleapp-584f99476d-mnrp4",
-    "namespace": "default",
-    "ownerReferences": [
-      {
-        "apiVersion": "apps/v1",
-        "kind": "ReplicaSet",
-        "blockOwnerDeletion": true,
-        "controller": true,
-        "name": "sampleapp-584f99476d",
-        "uid": "d68146d1-17c4-486e-aa8d-07d7d5d38b94"
-      }],
-    "resourceVersion": "1200430",
-    "selfLink": "/api/v1/namespaces/default/pods/sampleapp-584f99476d-mnrp4",
-    "uid": "9213879a-479c-42ce-856b-7e2666d21829"
-  },
-  "spec": {
-    "containers": [
-      {
-        "env": [
-          {
-            "name": "JAVA_OPTS",
-            "value": "-Delastic.search.host=host.docker.internal"
-          }],
-        "image": "localhost:5000/sampleapp:latest",
-        "imagePullPolicy": "Always",
-        "name": "sampleapp",
-        "ports": [
-          {
-            "containerPort": 8080,
-            "protocol": "TCP"
-          },
-          {
-            "containerPort": 5005,
-            "protocol": "TCP"
-          }],
-        "resources": {
-        },
-        "terminationMessagePath": "/dev/termination-log",
-        "terminationMessagePolicy": "File",
-        "volumeMounts": [
-          {
-            "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount",
-            "name": "default-token-jzq7d",
-            "readOnly": true
-          }]
-      }],
-    "dnsPolicy": "ClusterFirst",
-    "nodeName": "docker-desktop",
-    "priority": 0,
-    "restartPolicy": "Always",
-    "schedulerName": "default-scheduler",
-    "securityContext": {
-    },
-    "serviceAccount": "default",
-    "serviceAccountName": "default",
-    "terminationGracePeriodSeconds": 30,
-    "tolerations": [
-      {
-        "effect": "NoExecute",
-        "key": "node.kubernetes.io/not-ready",
-        "operator": "Exists",
-        "tolerationSeconds": 300
-      },
-      {
-        "effect": "NoExecute",
-        "key": "node.kubernetes.io/unreachable",
-        "operator": "Exists",
-        "tolerationSeconds": 300
-      }],
-    "volumes": [
-      {
-        "name": "default-token-jzq7d",
-        "secret": {
-          "defaultMode": 420,
-          "secretName": "default-token-jzq7d"
-        }
-      }],
-    "enableServiceLinks": true
-  },
-  "status": {
-    "conditions": [
-      {
-        "lastTransitionTime": "2020-06-14T21:50:09Z",
-        "status": "True",
-        "type": "Initialized"
-      },
-      {
-        "lastTransitionTime": "2020-06-14T21:50:10Z",
-        "status": "True",
-        "type": "Ready"
-      },
-      {
-        "lastTransitionTime": "2020-06-14T21:50:10Z",
-        "status": "True",
-        "type": "ContainersReady"
-      },
-      {
-        "lastTransitionTime": "2020-06-14T21:50:09Z",
-        "status": "True",
-        "type": "PodScheduled"
-      }],
-    "containerStatuses": [
-      {
-        "containerID": 
"docker://818b0098946c67e6ac56cb7c0934b7c2a9f50feb7244b422b2a7f566f7e5d0df",
-        "image": "sampleapp:latest",
-        "imageID":
-        
"docker-pullable://localhost:5000/sampleapp@sha256:3cefb2db514db73c69854fee8abd072f27240519432d08aad177a57ee34b7d39",
-        "lastState": {
-        },
-        "name": "sampleapp",
-        "ready": true,
-        "restartCount": 0,
-        "state": {
-          "running": {
-            "startedAt": "2020-06-14T21:50:10Z"
-          }
-        },
-        "started": true
-      }],
-    "hostIP": "192.168.65.3",
-    "phase": "Running",
-    "podIP": "10.1.0.47",
-    "qosClass": "BestEffort",
-    "startTime": "2020-06-14T21:50:09Z",
-    "podIPs": [
-      {
-        "ip": "10.1.0.47"
-      }]
-  }
-}
-
diff --git a/log4j-parent/pom.xml b/log4j-parent/pom.xml
index 1255a8c74f..3266bec636 100644
--- a/log4j-parent/pom.xml
+++ b/log4j-parent/pom.xml
@@ -119,7 +119,6 @@
     <junit-jupiter.version>5.10.2</junit-jupiter.version>
     <junit-pioneer.version>1.9.1</junit-pioneer.version>
     <kafka.version>3.7.0</kafka.version>
-    <kubernetes-client.version>5.12.4</kubernetes-client.version>
     <lightcouch.version>0.2.0</lightcouch.version>
     <log4j.version>1.2.17</log4j.version>
     
<log4j2-cachefile-transformer.version>2.15.0</log4j2-cachefile-transformer.version>
@@ -223,14 +222,6 @@
         <scope>import</scope>
       </dependency>
 
-      <dependency>
-        <groupId>io.fabric8</groupId>
-        <artifactId>kubernetes-client-bom</artifactId>
-        <version>${kubernetes-client.version}</version>
-        <type>pom</type>
-        <scope>import</scope>
-      </dependency>
-
       <dependency>
         <groupId>org.mockito</groupId>
         <artifactId>mockito-bom</artifactId>
diff --git a/pom.xml b/pom.xml
index c47ce79393..a555723467 100644
--- a/pom.xml
+++ b/pom.xml
@@ -255,7 +255,6 @@
     <module>log4j-jpl</module>
     <module>log4j-jdbc-dbcp2</module>
     <module>log4j-jul</module>
-    <module>log4j-kubernetes</module>
     <module>log4j-layout-template-json</module>
     <module>log4j-layout-template-json-test</module>
     <module>log4j-mongodb3</module>
@@ -454,12 +453,6 @@
         <version>${project.version}</version>
       </dependency>
 
-      <dependency>
-        <groupId>org.apache.logging.log4j</groupId>
-        <artifactId>log4j-kubernetes</artifactId>
-        <version>${project.version}</version>
-      </dependency>
-
       <dependency>
         <groupId>org.apache.logging.log4j</groupId>
         <artifactId>log4j-layout-template-json</artifactId>
diff --git a/src/site/asciidoc/log4j-kubernetes.adoc 
b/src/site/asciidoc/log4j-kubernetes.adoc
deleted file mode 100644
index f3757ceefe..0000000000
--- a/src/site/asciidoc/log4j-kubernetes.adoc
+++ /dev/null
@@ -1,202 +0,0 @@
-// vim: set syn=markdown :
-
-////
-Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
-
-         http://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
-////
-
-#set($dollar = '$') #set($h1='#') #set($h2='##')
-
-$h1 Log4j Kubernetes Support
-
-Log4j supports Kubernetes by providing a Lookup to retrieve container 
information.
-
-$h2 Accessing Kubernetes
-
-The Log4j Kubernetes support requires access to the Docker REST interface.
-In many cases the REST service can be accessed automatically.
-If needed the Kubernetes client can be configured any of the standard Log4j 
configuration locations or via the Spring Boot configuration.
-Note, however, that since Spring Boot causes logging to initialize 3 times and 
since the Spring environment is only available during the last Log4j 
initialization Spring properties will only be available to Log4j in the last 
initialization.
-
-$h2 Lookup Attributes
-
-Log4j Kubernetes provides access to the following container attributes:
-
-* accountName - The service account name.
-* clusterName - The name of the cluster the application is running in.
-* containerId - The full id assigned to the container.
-* containerName - The name assigned to the container.
-* host - The name of the host.
-* hostIp - The host's ip address.
-* imageId - The id assigned to the image.
-* imageName - The name assigned to the image.
-* labels - All labels formatted in a list.
-* labels.app - The application name.
-* labels.podTemplateHash - The pod's template hash value.
-* masterUrl - The url needed to access the API server.
-* namespaceId - The id of the namespace the various kubernetes components are 
located within.
-* namespaceName - The namespace the various kubernetes components are located 
within.
-* podId - The pod's id number.
-* podIp - The pod's ip address.
-* podName - The name of the pod.
-
-#set( $D = '${' ) #set( $container = 'k8s:containerId}') Attributes may be 
accessed by adding
-
-----
-$D$container
-----
-
-to the configuration.
-Note that kubernetes variables are only resolved once during logging 
initialization so they shouldn't be referenced with more than one '$' character.
-
-$h2 Configuration
-
-Much of the configuration needed to access the Kubernetes API server is 
provided automatically by Kubernetes.
-However, it is not uncommon to need to provide the url required to access the 
Kubernetes API server or the namespace the application is assigned to.
-The properties below may either be configured using the Log4j variable names 
and located by Log4j's normal property resolution mechansim or Log4j will 
resolve the spring properties when the application is running in Spring Boot 
and the Spring Environment has been created.
-Note that Spring Boot initializes logging 3 times and only the last will have 
a Spring Environment present.
-
-[cols=",>,>,>"]
-|===
-| Log4j Property Name | Spring Property Name | Default | Description
-
-| log4j2.kubernetes.client.apiVersion
-| spring.cloud.kubernetes.client.apiVersion
-| v1
-| Kubernetes API Version
-
-| log4j2.kubernetes.client.caCertData
-| spring.cloud.kubernetes.client.caCertData
-|
-| Kubernetes API CACertData
-
-| log4j2.kubernetes.client.caCertFile
-| spring.cloud.kubernetes.client.caCertFile
-|
-| Kubernetes API CACertFile
-
-| log4j2.kubernetes.client.clientCertData
-| spring.cloud.kubernetes.client.clientCertData
-|
-| Kubernetes API ClientCertData
-
-| log4j2.kubernetes.client.clientCertFile
-| spring.cloud.kubernetes.client.clientCertFile
-|
-| Kubernetes API ClientCertFile
-
-| log4j2.kubernetes.client.clientKeyAlgo
-| spring.cloud.kubernetes.client.clientKeyAlgo
-| RSA
-| Kubernetes API ClientKeyAlgo
-
-| log4j2.kubernetes.client.clientKeyData
-| spring.cloud.kubernetes.client.clientKeyData
-|
-| Kubernetes API ClientKeyData
-
-| log4j2.kubernetes.client.clientKeyFile
-| spring.cloud.kubernetes.client.clientKeyFile
-|
-| Kubernetes API ClientKeyFile
-
-| log4j2.kubernetes.client.clientKeyPassPhrase
-| spring.cloud.kubernetes.client.clientKeyPassphrase
-| changeit
-| Kubernetes API ClientKeyPassphrase
-
-| log4j2.kubernetes.client.connectionTimeout
-| spring.cloud.kubernetes.client.connectionTimeout
-| 10s
-| Connection timeout
-
-| log4j2.kubernetes.client.httpProxy
-| spring.cloud.kubernetes.client.http-proxy
-|
-|
-
-| log4j2.kubernetes.client.httpsProxy
-| spring.cloud.kubernetes.client.https-proxy
-|
-|
-
-| log4j2.kubernetes.client.loggingInberval
-| spring.cloud.kubernetes.client.loggingInterval
-| 20s
-| Logging interval
-
-| log4j2.kubernetes.client.masterUrl
-| spring.cloud.kubernetes.client.masterUrl
-| kubernetes.default.svc
-| Kubernetes API Master Node URL
-
-| log4j2.kubernetes.client.namespacce
-| spring.cloud.kubernetes.client.namespace
-| default
-| Kubernetes Namespace
-
-| log4j2.kubernetes.client.noProxy
-| spring.cloud.kubernetes.client.noProxy
-|
-|
-
-| log4j2.kubernetes.client.password
-| spring.cloud.kubernetes.client.password
-|
-| Kubernetes API Password
-
-| log4j2.kubernetes.client.proxyPassword
-| spring.cloud.kubernetes.client.proxyPassword
-|
-|
-
-| log4j2.kubernetes.client.proxyUsername
-| spring.cloud.kubernetes.client.proxyUsername
-|
-|
-
-| log4j2.kubernetes.client.requestTimeout
-| spring.cloud.kubernetes.client.requestTimeout
-| 10s
-| Request timeout
-
-| log4j2.kubernetes.client.rollingTimeout
-| spring.cloud.kubernetes.client.rollingTimeout
-| 900s
-| Rolling timeout
-
-| log4j2.kubernetes.client.trustCerts
-| spring.cloud.kubernetes.client.trustCerts
-| false
-| Kubernetes API Trust Certificates
-
-| log4j2.kubernetes.client.username
-| spring.cloud.kubernetes.client.username
-|
-| Kubernetes API Username
-
-| log4j2.kubernetes.client.watchReconnectInterval
-| spring.cloud.kubernetes.client.watchReconnectInterval
-| 1s
-| Reconnect Interval
-
-| log4j2.kubernetes.client.watchReconnectLimit
-| spring.cloud.kubernetes.client.watchReconnectLimit
-| -1
-| Reconnect Interval limit retries
-|===
-
-$h2 Requirements Log4j Kubernetes requires Log4j Core, Log4j API and a minimum 
of Java 8.
-For more information, see link:runtime-dependencies.html[Runtime Dependencies].
diff --git a/src/site/asciidoc/manual/cloud.adoc 
b/src/site/asciidoc/manual/cloud.adoc
index 00142c4645..bce6e28e0b 100644
--- a/src/site/asciidoc/manual/cloud.adoc
+++ b/src/site/asciidoc/manual/cloud.adoc
@@ -1,7 +1,5 @@
-// vim: set syn=markdown :
-
 ////
-Licensed to the Apache Software Foundation (ASF) under one or more
+ Licensed to the Apache Software Foundation (ASF) under one or more
  contributor license agreements. See the NOTICE file distributed with
  this work for additional information regarding copyright ownership.
  The ASF licenses this file to You under the Apache License, Version 2.0
@@ -16,320 +14,364 @@ Licensed to the Apache Software Foundation (ASF) under 
one or more
  See the License for the specific language governing permissions and
  limitations under the License.
 ////
+
 = Using Log4j in Cloud Enabled Applications
 
 == The Twelve-Factor Application
 
-The Logging Guidelines for https://12factor.net/logs[The Twelve-Factor App] 
state that all logs should be routed  unbuffered to stdout.
-Since this is the least common denominator it is guaranteed to work for all 
applications.
-However, as with any set of general guidelines, choosing the least common 
denominator approach comes at a cost.
-Some of the costs in Java applications include:
-
-. Java stack traces are multi-line log messages.
-The standard docker log driver cannot handle these properly.
-See  https://github.com/moby/moby/issues/22920[Docker Issue #22920] which was 
closed with the message "Don't Care".
-Solutions for this are to:  a.
-Use a docker log driver that does support multi-line log message,  b.
-Use a logging format that does not produce multi-line messages,  c.
-Log from Log4j directly to a logging forwarder or aggregator and bypass the 
docker logging driver.
-. When logging to stdout in Docker, log events pass through Java's standard 
output handling which is then directed  to the operating system so that the 
output can be piped into a file.
-The overhead of all this is measurably slower than just writing directly to a 
file as can be seen in these benchmark results where logging  to stdout is 
16-20 times slower over repeated runs than logging directly to the file.
-The results below were obtained by  running the 
https://github.com/apache/logging-log4j2/blob/2.x/log4j-perf/src/main/java/org/apache/logging/log4j/perf/jmh/OutputBenchmark.java[Output
 Benchmark] on a 2018 MacBook Pro with a 2.9GHz Intel Core i9 processor and a 
1TB SSD.
-However, these results alone would not be  enough to argue against writing to 
the standard output stream as they only amount to about 14-25 microseconds  per 
logging call vs 1.5 microseconds when writing to the file.
+The Logging Guidelines for https://12factor.net/logs[The Twelve-Factor App] 
state that all logs should be routed
+unbuffered to stdout. Since this is the least common denominator it is 
guaranteed to work for all applications. However,
+as with any set of general guidelines, choosing the least common denominator 
approach comes at a cost. Some of the costs
+in Java applications include:
+
+. Java stack traces are multi-line log messages. The standard docker log 
driver cannot handle these properly. See
+https://github.com/moby/moby/issues/22920[Docker Issue #22920] which was 
closed with the message "Don't Care".
+Solutions for this are to:
+.. Use a docker log driver that does support multi-line log message,
+.. Use a logging format that does not produce multi-line messages,
+.. Log from Log4j directly to a logging forwarder or aggregator and bypass the 
docker logging driver.
+. When logging to stdout in Docker, log events pass through Java's standard 
output handling which is then directed
+to the operating system so that the output can be piped into a file. The 
overhead of all this is measurably slower
+than just writing directly to a file as can be seen in these benchmark results 
where logging
+to stdout is 16-20 times slower over repeated runs than logging directly to 
the file. The results below were obtained by
+running the 
https://github.com/apache/logging-log4j2/blob/release-2.x/log4j-perf-test/src/main/java/org/apache/logging/log4j/perf/jmh/OutputBenchmark.java[Output
 Benchmark]
+on a 2018 MacBook Pro with a 2.9GHz Intel Core i9 processor and a 1TB SSD. 
However, these results alone would not be
+enough to argue against writing to the standard output stream as they only 
amount to about 14-25 microseconds
+per logging call vs 1.5 microseconds when writing to the file.
 +
+[source]
 ----
-     Benchmark                  Mode  Cnt       Score       Error  Units
-     OutputBenchmark.console   thrpt   20   39291.885 ±  3370.066  ops/s
-     OutputBenchmark.file      thrpt   20  654584.309 ± 59399.092  ops/s
-     OutputBenchmark.redirect  thrpt   20   70284.576 ±  7452.167  ops/s
+    Benchmark                  Mode  Cnt       Score       Error  Units
+    OutputBenchmark.console   thrpt   20   39291.885 ±  3370.066  ops/s
+    OutputBenchmark.file      thrpt   20  654584.309 ± 59399.092  ops/s
+    OutputBenchmark.redirect  thrpt   20   70284.576 ±  7452.167  ops/s
 ----
-
-. When performing audit logging using a framework such as log4j-audit 
guaranteed delivery of the audit events is required.
-Many of the options for writing the output, including writing to the standard 
output stream, do not guarantee delivery.
-In these cases the event must be delivered to a "forwarder" that acknowledges 
receipt only when it has placed the event in durable storage, such as what 
https://flume.apache.org/[Apache Flume]  or https://kafka.apache.org/[Apache 
Kafka] will do.
+. When performing audit logging using a framework such as log4j-audit 
guaranteed delivery of the audit events
+is required. Many of the options for writing the output, including writing to 
the standard output stream, do
+not guarantee delivery. In these cases the event must be delivered to a 
"forwarder" that acknowledges receipt
+only when it has placed the event in durable storage, such as what 
https://flume.apache.org/[Apache Flume] will do.
 
 == Logging Approaches
 
-All the solutions discussed on this page are predicated with the idea that log 
files cannot permanently reside on the file system and that all log events 
should be routed to one or more log analysis tools that will  be used for 
reporting and alerting.
-There are many ways to forward and collect events to be sent to the  log 
analysis tools.
+All the solutions discussed on this page are predicated with the idea that log 
files cannot permanently
+reside on the file system and that all log events should be routed to one or 
more log analysis tools that will
+be used for reporting and alerting. There are many ways to forward and collect 
events to be sent to the
+log analysis tools.
 
-Note that any approach that bypasses Docker's logging drivers requires Log4j's 
 link:lookups.html#DockerLookup[Docker Lookup] to allow Docker attributes to be 
injected into the log events.
+Note that any approach that bypasses Docker's logging drivers requires Log4j's
+https://logging.apache.org/log4j/2.x/manual/lookups.html#DockerLookup[Docker 
Lookup] to allow Docker attributes to be injected into the log events.
 
 === Logging to the Standard Output Stream
 
 As discussed above, this is the recommended 12-Factor approach for 
applications running in a docker container.
 The Log4j team does not recommend this approach for performance reasons.
 
-image::../images/DockerStdout.png[Stdout]
+image::../images/DockerStdout.png[Stdout, "Application Logging to the Standard 
Output Stream"]
 
 === Logging to the Standard Output Stream with the Docker Fluentd Logging 
Driver
 
-Docker provides alternate 
https://docs.docker.com/config/containers/logging/configure/[logging drivers],  
such as https://docs.docker.com/config/containers/logging/gelf/[gelf] or  
https://docs.docker.com/config/containers/logging/fluentd/[fluentd], that can 
be used to redirect the standard output stream to a log forwarder or log 
aggregator.
+Docker provides alternate 
https://docs.docker.com/config/containers/logging/configure/[logging drivers],
+such as https://docs.docker.com/config/containers/logging/fluentd/[fluentd], 
that
+can be used to redirect the standard output stream to a log forwarder or log 
aggregator.
 
-When routing to a log forwarder it is expected that the forwarder will have 
the same lifetime as the  application.
-If the forwarder should fail the management tools would be expected to also 
terminate  other containers dependent on the forwarder.
+When routing to a log forwarder it is expected that the forwarder will have 
the same lifetime as the
+application. If the forwarder should fail the management tools would be 
expected to also terminate
+other containers dependent on the forwarder.
 
-image::../images/DockerFluentd.png[Docker Fluentbit]
+image::../images/DockerFluentd.png[Docker Fluentbit, "Logging via StdOut using 
the Docker Fluentd Logging Driver to Fluent-bit"]
 
 As an alternative the logging drivers could be configured to route events 
directly to a logging aggregator.
 This is generally not a good idea as the logging drivers only allow a single 
host and port to be configured.
-The docker documentation isn't clear but infers that log events will be 
dropped when log events cannot be delivered so this method should not be used 
if a highly available solution is required.
+The docker documentation isn't clear but infers that log events will be 
dropped when log events cannot be
+delivered so this method should not be used if a highly available solution is 
required.
 
-image::../images/DockerFluentdAggregator.png[Docker Fluentd]
+image::../images/DockerFluentdAggregator.png[Docker Fluentd, "Logging via 
StdOut using the Docker Fluentd Logging Driver to Fluentd"]
 
 === Logging to a File
 
-While this is not the recommended 12-Factor approach, it performs very well.
-However, it requires that the  application declares a volume where the log 
files will reside and then configures the log forwarder to tail  those files.
-Care must also be taken to automatically manage the disk space used for the 
logs, which Log4j  can perform via the "Delete" action on the 
link:appenders.html#RollingFileAppender[RollingFileAppender].
+While this is not the recommended 12-Factor approach, it performs very well. 
However, it requires that the
+application declares a volume where the log files will reside and then 
configures the log forwarder to tail
+those files. Care must also be taken to automatically manage the disk space 
used for the logs, which Log4j
+can perform via the "Delete" action on the 
https://logging.apache.org/log4j/2.x/manual/appenders.html#RollingFileAppender[RollingFileAppender].
 
-image::../images/DockerLogFile.png[File]
+image::../images/DockerLogFile.png[File, "Logging to a File"]
 
 === Sending Directly to a Log Forwarder via TCP
 
-Sending logs directly to a Log Forwarder is simple as it generally just 
requires that the forwarder's host and port be configured on a SocketAppender 
with an appropriate layout.
+Sending logs directly to a Log Forwarder is simple as it generally just 
requires that the forwarder's
+host and port be configured on a SocketAppender with an appropriate layout.
 
-image::../images/DockerTCP.png[TCP]
+image::../images/DockerTCP.png[TCP, "Application Logging to a Forwarder via 
TCP"]
 
 === Sending Directly to a Log Aggregator via TCP
 
-Similar to sending logs to a forwarder, logs can also be sent to a cluster of 
aggregators.
-However, setting this up is not as simple since, to be highly available, a 
cluster of aggregators must be used.
-However, the SocketAppender currently can only be configured with a single 
host and port.
-To allow  for failover if the primary aggregator fails the SocketAppender must 
be enclosed in a  link:appenders.html#FailoverAppender[FailoverAppender], which 
would also have the secondary aggregator configured.
-Another option is to have the SocketAppender  point to a highly available 
proxy that can forward to the Log Aggregator.
+Similar to sending logs to a forwarder, logs can also be sent to a cluster of 
aggregators. However,
+setting this up is not as simple since, to be highly available, a cluster of 
aggregators must be used.
+However, the SocketAppender currently can only be configured with a single 
host and port. To allow
+for failover if the primary aggregator fails the SocketAppender must be 
enclosed in a
+https://logging.apache.org/log4j/2.x/manual/appenders.html#FailoverAppender[FailoverAppender],
+which would also have the secondary aggregator configured. Another option is 
to have the SocketAppender
+point to a highly available proxy that can forward to the Log Aggregator.
 
-If the log aggregator used is Apache Flume or Apache Kafka (or similar) the 
Appenders for these support  being configured with a list of hosts and ports so 
high availability is not an issue.
+If the log aggregator used is Apache Flume (or similar) the Appenders for 
these support
+being configured with a list of hosts and ports so high availability is not an 
issue.
 
-image::../images/LoggerAggregator.png[Aggregator]
+image::../images/LoggerAggregator.png[Aggregator, "Application Logging to an 
Aggregator via TCP"]
 
-== +++<a name="ELK">++++++</a>+++Logging using Elasticsearch, Logstash, and 
Kibana
+[#ELK]
+== Logging using Elasticsearch, Logstash, and Kibana
 
-There are various approaches with different trade-offs for ingesting logs into 
an ELK stack.
-Here we will briefly cover how one can forward Log4j generated events first to 
Logstash and then to Elasticsearch.
+There are various approaches with different trade-offs for ingesting logs into
+an ELK stack. Here we will briefly cover how one can forward Log4j generated
+events first to Logstash and then to Elasticsearch.
 
 === Log4j Configuration
 
-=== JsonTemplateLayout
-
-Log4j provides a multitude of JSON generating layouts.
-In particular, link:layouts.html#JSONTemplateLayout[JSON Template Layout] 
allows full schema customization and bundles ELK-specific layouts by default, 
which makes it a great fit for the bill.
-Using the EcsLayout template as shown below will generate data in Kibana where 
the message displayed exactly matches the message passed to Log4j and most of 
the event attributes, including any exceptions, are present as individual 
attributes that can be displayed.
-Note, however that stack traces  will be formatted without newlines.
-
- <Socket name="Logstash"
-         host="${sys:logstash.host}"
-         port="12345"
-         protocol="tcp"
-         bufferedIo="true">
-   <JsonTemplateLayout eventTemplateUri="classpath:EcsLayout.json">
-     <EventTemplateAdditionalField key="containerId" 
value="${docker:containerId:-}"/>
-     <EventTemplateAdditionalField key="application" 
value="${lower:${spring:spring.application.name:-spring}}"/>
-     <EventTemplateAdditionalField key="kubernetes.serviceAccountName" 
value="${k8s:accountName:-}"/>
-     <EventTemplateAdditionalField key="kubernetes.containerId" 
value="${k8s:containerId:-}"/>
-     <EventTemplateAdditionalField key="kubernetes.containerName" 
value="${k8s:containerName:-}"/>
-     <EventTemplateAdditionalField key="kubernetes.host" 
value="${k8s:host:-}"/>
-     <EventTemplateAdditionalField key="kubernetes.labels.app" 
value="${k8s:labels.app:-}"/>
-     <EventTemplateAdditionalField key="kubernetes.labels.pod-template-hash" 
value="${k8s:labels.podTemplateHash:-}"/>
-     <EventTemplateAdditionalField key="kubernetes.master_url" 
value="${k8s:masterUrl:-}"/>
-     <EventTemplateAdditionalField key="kubernetes.namespaceId" 
value="${k8s:namespaceId:-}"/>
-     <EventTemplateAdditionalField key="kubernetes.namespaceName" 
value="${k8s:namespaceName:-}"/>
-     <EventTemplateAdditionalField key="kubernetes.podID" 
value="${k8s:podId:-}"/>
-     <EventTemplateAdditionalField key="kubernetes.podIP" 
value="${k8s:podIp:-}"/>
-     <EventTemplateAdditionalField key="kubernetes.podName" 
value="${k8s:podName:-}"/>
-     <EventTemplateAdditionalField key="kubernetes.imageId" 
value="${k8s:imageId:-}"/>
-     <EventTemplateAdditionalField key="kubernetes.imageName" 
value="${k8s:imageName:-}"/>
-   </JsonTemplateLayout>
- </Socket>
-
-==== Gelft Template
-
-The JsonTemplateLayout can also be used to generate JSON that matches the GELF 
specification which can format  the message attribute using a pattern in 
accordance with the PatternLayout.
-For example, the following template, named EnhancedGelf.json, can be used to 
generate GELF-compliant data that can be passed to Logstash.
-With this template the message attribute will include the thread id, level, 
specific ThreadContext attributes,  the class name, method name, and line 
number as well as the message.
-If an exception is included it will also  be included with newlines.
-This format follows very closely what you would see in a typical log file on 
disk  using the PatternLayout but has the additional advantage of including the 
attributes as separate fields that  can be queried.
-
- {
-     "version": "1.1",
-     "host": "${hostName}",
-     "short_message": {
-         "$resolver": "message",
-         "stringified": true
-     },
-     "full_message": {
-         "$resolver": "message",
-         "pattern": "[%t] %-5p %X{requestId, sessionId, loginId, userId, 
ipAddress, corpAcctNumber} %C{1.}.%M:%L - %m",
-         "stringified": true
-     },
-     "timestamp": {
-         "$resolver": "timestamp",
-         "epoch": {
-             "unit": "secs"
-         }
-     },
-     "level": {
-         "$resolver": "level",
-         "field": "severity",
-         "severity": {
-             "field": "code"
-         }
-     },
-     "_logger": {
-         "$resolver": "logger",
-         "field": "name"
-     },
-     "_thread": {
-         "$resolver": "thread",
-         "field": "name"
-     },
-     "_mdc": {
-         "$resolver": "mdc",
-         "flatten": {
-             "prefix": "_"
-         },
-         "stringified": true
-     }
- }
+==== JsonTemplateLayout
+Log4j provides a multitude of JSON generating layouts. In particular, JSON
+Template Layout allows full schema
+customization and bundles ELK-specific layouts by default, which makes it a
+great fit for the bill. Using the EcsLayout template as shown below will 
generate data in Kibana where
+the message displayed exactly matches the message passed to Log4j and most of 
the event attributes, including
+any exceptions, are present as individual attributes that can be displayed. 
Note, however that stack traces
+will be formatted without newlines.
+
+[source,xml]
+----
+<Socket name="Logstash"
+        host="${sys:logstash.host}"
+        port="12345"
+        protocol="tcp"
+        bufferedIo="true">
+  <JsonTemplateLayout eventTemplateUri="classpath:EcsLayout.json">
+    <EventTemplateAdditionalField key="containerId" 
value="${docker:containerId:-}"/>
+    <EventTemplateAdditionalField key="application" 
value="${lower:${spring:spring.application.name:-spring}}"/>
+    <EventTemplateAdditionalField key="kubernetes.serviceAccountName" 
value="${k8s:accountName:-}"/>
+    <EventTemplateAdditionalField key="kubernetes.containerId" 
value="${k8s:containerId:-}"/>
+    <EventTemplateAdditionalField key="kubernetes.containerName" 
value="${k8s:containerName:-}"/>
+    <EventTemplateAdditionalField key="kubernetes.host" value="${k8s:host:-}"/>
+    <EventTemplateAdditionalField key="kubernetes.labels.app" 
value="${k8s:labels.app:-}"/>
+    <EventTemplateAdditionalField key="kubernetes.labels.pod-template-hash" 
value="${k8s:labels.podTemplateHash:-}"/>
+    <EventTemplateAdditionalField key="kubernetes.master_url" 
value="${k8s:masterUrl:-}"/>
+    <EventTemplateAdditionalField key="kubernetes.namespaceId" 
value="${k8s:namespaceId:-}"/>
+    <EventTemplateAdditionalField key="kubernetes.namespaceName" 
value="${k8s:namespaceName:-}"/>
+    <EventTemplateAdditionalField key="kubernetes.podID" 
value="${k8s:podId:-}"/>
+    <EventTemplateAdditionalField key="kubernetes.podIP" 
value="${k8s:podIp:-}"/>
+    <EventTemplateAdditionalField key="kubernetes.podName" 
value="${k8s:podName:-}"/>
+    <EventTemplateAdditionalField key="kubernetes.imageId" 
value="${k8s:imageId:-}"/>
+    <EventTemplateAdditionalField key="kubernetes.imageName" 
value="${k8s:imageName:-}"/>
+  </JsonTemplateLayout>
+</Socket>
+----
+
+==== Gelf Template
+
+The JsonTemplateLayout can also be used to generate JSON that matches the GELF 
specification which can format
+the message attribute using a pattern in accordance with the PatternLayout. 
For example, the following
+template, named EnhancedGelf.json, can be used to generate GELF-compliant data 
that can be passed to Logstash.
+With this template the message attribute will include the thread id, level, 
specific ThreadContext attributes,
+the class name, method name, and line number as well as the message. If an 
exception is included it will also
+be included with newlines. This format follows very closely what you would see 
in a typical log file on disk
+using the PatternLayout but has the additional advantage of including the 
attributes as separate fields that
+can be queried.
+
+[source,json]
+----
+{
+    "version": "1.1",
+    "host": "${hostName}",
+    "short_message": {
+        "$resolver": "message",
+        "stringified": true
+    },
+    "full_message": {
+        "$resolver": "message",
+        "pattern": "[%t] %-5p %X{requestId, sessionId, loginId, userId, 
ipAddress, corpAcctNumber} %C{1.}.%M:%L - %m",
+        "stringified": true
+    },
+    "timestamp": {
+        "$resolver": "timestamp",
+        "epoch": {
+            "unit": "secs"
+        }
+    },
+    "level": {
+        "$resolver": "level",
+        "field": "severity",
+        "severity": {
+            "field": "code"
+        }
+    },
+    "_logger": {
+        "$resolver": "logger",
+        "field": "name"
+    },
+    "_thread": {
+        "$resolver": "thread",
+        "field": "name"
+    },
+    "_mdc": {
+        "$resolver": "mdc",
+        "flatten": {
+            "prefix": "_"
+        },
+        "stringified": true
+    }
+}
+----
 
 The logging configuration to use this template would be
 
- <Socket name="Elastic"
-         host="\${sys:logstash.search.host}"
-         port="12222"
-         protocol="tcp"
-         bufferedIo="true">
-   <JsonTemplateLayout eventTemplateUri="classpath:EnhancedGelf.json" 
nullEventDelimiterEnabled="true">
-     <EventTemplateAdditionalField key="containerId" 
value="${docker:containerId:-}"/>
-     <EventTemplateAdditionalField key="application" 
value="${lower:${spring:spring.application.name:-spring}}"/>
-     <EventTemplateAdditionalField key="kubernetes.serviceAccountName" 
value="${k8s:accountName:-}"/>
-     <EventTemplateAdditionalField key="kubernetes.containerId" 
value="${k8s:containerId:-}"/>
-     <EventTemplateAdditionalField key="kubernetes.containerName" 
value="${k8s:containerName:-}"/>
-     <EventTemplateAdditionalField key="kubernetes.host" 
value="${k8s:host:-}"/>
-     <EventTemplateAdditionalField key="kubernetes.labels.app" 
value="${k8s:labels.app:-}"/>
-     <EventTemplateAdditionalField key="kubernetes.labels.pod-template-hash" 
value="${k8s:labels.podTemplateHash:-}"/>
-     <EventTemplateAdditionalField key="kubernetes.master_url" 
value="${k8s:masterUrl:-}"/>
-     <EventTemplateAdditionalField key="kubernetes.namespaceId" 
value="${k8s:namespaceId:-}"/>
-     <EventTemplateAdditionalField key="kubernetes.namespaceName" 
value="${k8s:namespaceName:-}"/>
-     <EventTemplateAdditionalField key="kubernetes.podID" 
value="${k8s:podId:-}"/>
-     <EventTemplateAdditionalField key="kubernetes.podIP" 
value="${k8s:podIp:-}"/>
-     <EventTemplateAdditionalField key="kubernetes.podName" 
value="${k8s:podName:-}"/>
-     <EventTemplateAdditionalField key="kubernetes.imageId" 
value="${k8s:imageId:-}"/>
-     <EventTemplateAdditionalField key="kubernetes.imageName" 
value="${k8s:imageName:-}"/>
-   </JsonTemplateLayout>
- </Socket>
-
-The significant difference with this configuration from the first example is 
that it references the  custom template and it specifies an event delimiter of 
a null character ('\0');
-
-NOTE: The level being passed with the above template does not strictly conform 
to the GELF spec as the Level being passed is the Log4j Level NOT the Level 
defined in the GELF spec.
-However, testing has shown  that Logstash, Elk, and Kibana are pretty tolerant 
of whatever data is passed to it.
+[source,xml]
+----
+<Socket name="Elastic"
+        host="\${sys:logstash.search.host}"
+        port="12222"
+        protocol="tcp"
+        bufferedIo="true">
+  <JsonTemplateLayout eventTemplateUri="classpath:EnhancedGelf.json" 
nullEventDelimiterEnabled="true">
+    <EventTemplateAdditionalField key="containerId" 
value="${docker:containerId:-}"/>
+    <EventTemplateAdditionalField key="application" 
value="${lower:${spring:spring.application.name:-spring}}"/>
+    <EventTemplateAdditionalField key="kubernetes.serviceAccountName" 
value="${k8s:accountName:-}"/>
+    <EventTemplateAdditionalField key="kubernetes.containerId" 
value="${k8s:containerId:-}"/>
+    <EventTemplateAdditionalField key="kubernetes.containerName" 
value="${k8s:containerName:-}"/>
+    <EventTemplateAdditionalField key="kubernetes.host" value="${k8s:host:-}"/>
+    <EventTemplateAdditionalField key="kubernetes.labels.app" 
value="${k8s:labels.app:-}"/>
+    <EventTemplateAdditionalField key="kubernetes.labels.pod-template-hash" 
value="${k8s:labels.podTemplateHash:-}"/>
+    <EventTemplateAdditionalField key="kubernetes.master_url" 
value="${k8s:masterUrl:-}"/>
+    <EventTemplateAdditionalField key="kubernetes.namespaceId" 
value="${k8s:namespaceId:-}"/>
+    <EventTemplateAdditionalField key="kubernetes.namespaceName" 
value="${k8s:namespaceName:-}"/>
+    <EventTemplateAdditionalField key="kubernetes.podID" 
value="${k8s:podId:-}"/>
+    <EventTemplateAdditionalField key="kubernetes.podIP" 
value="${k8s:podIp:-}"/>
+    <EventTemplateAdditionalField key="kubernetes.podName" 
value="${k8s:podName:-}"/>
+    <EventTemplateAdditionalField key="kubernetes.imageId" 
value="${k8s:imageId:-}"/>
+    <EventTemplateAdditionalField key="kubernetes.imageName" 
value="${k8s:imageName:-}"/>
+  </JsonTemplateLayout>
+</Socket>
+----
+
+The significant difference with this configuration from the first example is 
that it references the
+custom template and it specifies an event delimiter of a null character ('\0');
+
+**Note**: The level being passed with the above template does not strictly 
conform to the GELF spec as the
+Level being passed is the Log4j Level NOT the Level defined in the GELF spec. 
However, testing has shown
+that Logstash, Elk, and Kibana are pretty tolerant of whatever data is passed 
to it.
 
 ==== Custom Template
 
-Another option is to use a custom template, possibly based on one of the 
standard templates.
-The template  below is loosely based on ECS but a) adds the spring boot 
application name, b) formats the message using PatternLayout, formats Map 
Messages as event.data attributes while setting the event action based on any 
Marker included in the event, includes all the ThreadContext attributes.
-
-NOTE: The Json Template Layout escapes control sequences so messages that 
contain '\n' will have those  control sequences copied as "\n" into the text 
rather than converted to a newline character.
-This bypasses  many problems that occur with Log Forwarders such as Filebeat 
and FluentBit/Fluentd.
-Kibana will correctly interpret these squences as newlines and display them 
correctly.
-Also note that the message pattern does not contain a timestamp.
-Kibana will display the timestamp field in its own column so placing it in the 
 message would be redundant.
-
- {
-   "@timestamp": {
-     "$resolver": "timestamp",
-     "pattern": {
-       "format": "yyyy-MM-dd'T'HH:mm:ss.SSS'Z'",
-       "timeZone": "UTC"
-     }
-   },
-   "ecs.version": "1.11.0",
-   "log.level": {
-     "$resolver": "level",
-     "field": "name"
-   },
-   "application": "\${lower:\${spring:spring.application.name}}",
-   "short_message": {
-     "$resolver": "message",
-     "stringified": true
-   },
-   "message": {
-     "$resolver": "pattern",
-     "pattern": "[%t] %X{requestId, sessionId, loginId, userId, ipAddress, 
accountNumber} %C{1.}.%M:%L - %m%n"
-   },
-   "process.thread.name": {
-     "$resolver": "thread",
-     "field": "name"
-   },
-   "log.logger": {
-     "$resolver": "logger",
-     "field": "name"
-   },
-   "event.action": {
-     "$resolver": "marker",
-     "field": "name"
-   },
-   "event.data": {
-     "$resolver": "map",
-     "stringified": true
-   },
-   "labels": {
-     "$resolver": "mdc",
-     "flatten": true,
-     "stringified": true
-   },
-   "tags": {
-     "$resolver": "ndc"
-   },
-   "error.type": {
-     "$resolver": "exception",
-     "field": "className"
-   },
-   "error.message": {
-     "$resolver": "exception",
-     "field": "message"
-   },
-   "error.stack_trace": {
-     "$resolver": "exception",
-     "field": "stackTrace",
-     "stackTrace": {
-       "stringified": true
-     }
-   }
- }
-
-Finally, the GelfLayout can be used to generate GELF compliant output.
-Unlike the JsonTemplateLayout it  adheres closely to the GELF spec.
-
- <Socket name="Elastic" host="${sys:elastic.search.host}" port="12222" 
protocol="tcp" bufferedIo="true">
-   <GelfLayout includeStackTrace="true" host="${hostName}" 
includeThreadContext="true" includeNullDelimiter="true"
-               compressionType="OFF">
-     
<ThreadContextIncludes>requestId,sessionId,loginId,userId,ipAddress,callingHost</ThreadContextIncludes>
-     <MessagePattern>%d [%t] %-5p %X{requestId, sessionId, loginId, userId, 
ipAddress} %C{1.}.%M:%L - %m%n</MessagePattern>
-     <KeyValuePair key="containerId" value="${docker:containerId:-}"/>
-     <KeyValuePair key="application" 
value="${lower:${spring:spring.application.name:-spring}}"/>
-     <KeyValuePair key="kubernetes.serviceAccountName" 
value="${k8s:accountName:-}"/>
-     <KeyValuePair key="kubernetes.containerId" value="${k8s:containerId:-}"/>
-     <KeyValuePair key="kubernetes.containerName" 
value="${k8s:containerName:-}"/>
-     <KeyValuePair key="kubernetes.host" value="${k8s:host:-}"/>
-     <KeyValuePair key="kubernetes.labels.app" value="${k8s:labels.app:-}"/>
-     <KeyValuePair key="kubernetes.labels.pod-template-hash" 
value="${k8s:labels.podTemplateHash:-}"/>
-     <KeyValuePair key="kubernetes.master_url" value="${k8s:masterUrl:-}"/>
-     <KeyValuePair key="kubernetes.namespaceId" value="${k8s:namespaceId:-}"/>
-     <KeyValuePair key="kubernetes.namespaceName" 
value="${k8s:namespaceName:-}"/>
-     <KeyValuePair key="kubernetes.podID" value="${k8s:podId:-}"/>
-     <KeyValuePair key="kubernetes.podIP" value="${k8s:podIp:-}"/>
-     <KeyValuePair key="kubernetes.podName" value="${k8s:podName:-}"/>
-     <KeyValuePair key="kubernetes.imageId" value="${k8s:imageId:-}"/>
-     <KeyValuePair key="kubernetes.imageName" value="${k8s:imageName:-}"/>
-   </GelfLayout>
- </Socket>
+Another option is to use a custom template, possibly based on one of the 
standard templates. The template
+below is loosely based on ECS but a) adds the spring boot application name, b) 
formats the message
+using PatternLayout, formats Map Messages as event.data attributes while 
setting the event action based on
+any Marker included in the event, includes all the ThreadContext attributes.
+
+**Note**: The Json Template Layout escapes control sequences so messages that 
contain '\n' will have those
+control sequences copied as "\n" into the text rather than converted to a 
newline character. This bypasses
+many problems that occur with Log Forwarders such as Filebeat and 
FluentBit/Fluentd. Kibana will correctly
+interpret these sequences as newlines and display them correctly. Also note 
that the message pattern does
+not contain a timestamp. Kibana will display the timestamp field in its own 
column so placing it in the
+message would be redundant.
+
+[source,json]
+----
+{
+  "@timestamp": {
+    "$resolver": "timestamp",
+    "pattern": {
+      "format": "yyyy-MM-dd'T'HH:mm:ss.SSS'Z'",
+      "timeZone": "UTC"
+    }
+  },
+  "ecs.version": "1.11.0",
+  "log.level": {
+    "$resolver": "level",
+    "field": "name"
+  },
+  "application": "\\${lower:\\${spring:spring.application.name}}",
+  "short_message": {
+    "$resolver": "message",
+    "stringified": true
+  },
+  "message": {
+    "$resolver": "pattern",
+    "pattern": "[%t] %X{requestId, sessionId, loginId, userId, ipAddress, 
accountNumber} %C{1.}.%M:%L - %m%n"
+  },
+  "process.thread.name": {
+    "$resolver": "thread",
+    "field": "name"
+  },
+  "log.logger": {
+    "$resolver": "logger",
+    "field": "name"
+  },
+  "event.action": {
+    "$resolver": "marker",
+    "field": "name"
+  },
+  "event.data": {
+    "$resolver": "map",
+    "stringified": true
+  },
+  "labels": {
+    "$resolver": "mdc",
+    "flatten": true,
+    "stringified": true
+  },
+  "tags": {
+    "$resolver": "ndc"
+  },
+  "error.type": {
+    "$resolver": "exception",
+    "field": "className"
+  },
+  "error.message": {
+    "$resolver": "exception",
+    "field": "message"
+  },
+  "error.stack_trace": {
+    "$resolver": "exception",
+    "field": "stackTrace",
+    "stackTrace": {
+      "stringified": true
+    }
+  }
+}
+----
+
+Finally, the GelfLayout can be used to generate GELF compliant output. Unlike 
the JsonTemplateLayout it adheres closely to the GELF spec.
+
+[source,xml]
+----
+<Socket name="Elastic" host="${sys:elastic.search.host}" port="12222" 
protocol="tcp" bufferedIo="true">
+    <GelfLayout includeStackTrace="true" host="${hostName}" 
includeThreadContext="true" includeNullDelimiter="true"
+    compressionType="OFF">
+        
<ThreadContextIncludes>requestId,sessionId,loginId,userId,ipAddress,callingHost</ThreadContextIncludes>
+        <MessagePattern>%d [%t] %-5p %X{requestId, sessionId, loginId, userId, 
ipAddress} %C{1.}.%M:%L - %m%n</MessagePattern>
+        <KeyValuePair key="containerId" value="${docker:containerId:-}"/>
+        <KeyValuePair key="application" 
value="${lower:${spring:spring.application.name:-spring}}"/>
+        <KeyValuePair key="kubernetes.serviceAccountName" 
value="${k8s:accountName:-}"/>
+        <KeyValuePair key="kubernetes.containerId" 
value="${k8s:containerId:-}"/>
+        <KeyValuePair key="kubernetes.containerName" 
value="${k8s:containerName:-}"/>
+        <KeyValuePair key="kubernetes.host" value="${k8s:host:-}"/>
+        <KeyValuePair key="kubernetes.labels.app" value="${k8s:labels.app:-}"/>
+        <KeyValuePair key="kubernetes.labels.pod-template-hash" 
value="${k8s:labels.podTemplateHash:-}"/>
+        <KeyValuePair key="kubernetes.master_url" value="${k8s:masterUrl:-}"/>
+        <KeyValuePair key="kubernetes.namespaceId" 
value="${k8s:namespaceId:-}"/>
+        <KeyValuePair key="kubernetes.namespaceName" 
value="${k8s:namespaceName:-}"/>
+        <KeyValuePair key="kubernetes.podID" value="${k8s:podId:-}"/>
+        <KeyValuePair key="kubernetes.podIP" value="${k8s:podIp:-}"/>
+        <KeyValuePair key="kubernetes.podName" value="${k8s:podName:-}"/>
+        <KeyValuePair key="kubernetes.imageId" value="${k8s:imageId:-}"/>
+        <KeyValuePair key="kubernetes.imageName" value="${k8s:imageName:-}"/>
+    </GelfLayout>
+</Socket>
+----
 
 ==== Logstash Configuration with Gelf
 
 We will configure Logstash to listen on TCP port 12345 for payloads of type 
JSON and then forward these to (either console and/or) an Elasticsearch server.
 
-....
+[source]
+----
 input {
   tcp {
     port => 12345
@@ -338,301 +380,175 @@ input {
 }
 
 output {
-
-  # (Un)comment for debugging purposes.
+  # Uncomment for debugging purposes.
   # stdout { codec => rubydebug }
 
-  # Modify the hosts value to reflect where elasticsearch is installed.
+  # Modify the hosts value to reflect where Elasticsearch is installed.
   elasticsearch {
     hosts => ["http://localhost:9200/";]
     index => "app-%{application}-%{+YYYY.MM.dd}"
   }
-
 }
-....
+----
 
 ==== Logstash Configuration with JsonTemplateLayout
 
-When one of the GELF compliant formats is used Logstash should be configured as
-
-gelf {            host \=> "localhost"            use_tcp \=> true            
use_udp \=> false            port \=> 12222            type \=> "gelf"          
}        }
-
-    filter {
-      # These are GELF/Syslog logging levels as defined in RFC 3164. Map the 
integer level to its human readable format.
-      translate {
-        field => "[level]"
-        destination => "[levelName]"
-        dictionary => {
-          "0" => "EMERG"
-          "1" => "ALERT"
-          "2" => "CRITICAL"
-          "3" => "ERROR"
-          "4" => "WARN"
-          "5" => "NOTICE"
-          "6" => "INFO"
-          "7" => "DEBUG"
-        }
-      }
-    }
+When using one of the GELF compliant formats, Logstash should be configured as:
+
+[source]
+----
+gelf {
+       host => "localhost"
+       use_tcp => true
+       use_udp => false
+       port => 12222
+       type => "gelf"
+     }
+   }
+
+   filter {
+     translate {
+       field => "[level]"
+       destination => "[levelName]"
+       dictionary => {
+         "0" => "EMERG"
+         "1" => "ALERT"
+         "2" => "CRITICAL"
+         "3" => "ERROR"
+         "4" => "WARN"
+         "5" => "NOTICE"
+         "6" => "INFO"
+         "7" => "DEBUG"
+       }
+     }
+   }
+
+   output {
+     elasticsearch {
+       hosts => ["http://localhost:9200/";]
+       index => "app-%{application}-%{+YYYY.MM.dd}"
+     }
+   }
+----
 
-    output {
-      # (Un)comment for debugging purposes
-      # stdout { codec => rubydebug }
-      # Modify the hosts value to reflect where elasticsearch is installed.
-      elasticsearch {
-        hosts => ["http://localhost:9200/";]
-        index => "app-%{application}-%{+YYYY.MM.dd}"
-      }
-    } #### Filebeat configuration with JsonTemplateLayout
-
-When using a JsonTemplateLayout that complies with ECS (or is similar to the 
custom template previously shown) the configuration of filebeat is 
straightforward.
-
- filebeat.inputs:
- - type: log
-   enabled: true
-   json.keys_under_root: true
-   paths:
-     - /var/log/apps/*.log
+==== Filebeat configuration with JsonTemplateLayout
+
+When using a JsonTemplateLayout that complies with ECS, Filebeat configuration 
is straightforward.
+
+[source,yaml]
+----
+filebeat.inputs:
+- type: log
+  enabled: true
+  json.keys_under_root: true
+  paths:
+    - /var/log/apps/*.log
+----
 
 === Kibana
 
-Using the EnhancedGelf template, the GelfLayout or the custom template the 
above configurations the message  field will contain a fully formatted log 
event just as it would  appear in a file Appender.
-The ThreadContext  attributes, custome fields, thread name, etc.
-will all be available as attributes on each log event that can  be used for 
filtering.
-The result will resemble image:../images/kibana.png[]
+Using the EnhancedGelf template or the custom template, the above 
configurations will allow the message
+field to contain a fully formatted log event. The ThreadContext attributes, 
custom fields, thread name, etc., will all be available as attributes on each 
log event that can be used for filtering. The result will resemble:
+
+image::../images/kibana.png[Kibana, "Kibana Display"]
 
 == Managing Logging Configuration
 
-Spring Boot provides another least common denominator approach to logging 
configuration.
-It will let you set the  log level for various Loggers within an application 
which can be dynamically updated via REST endpoints provided  by Spring.
-While this works in a lot of cases it does not support any of the more 
advanced filtering features of  Log4j.
-For example, since it cannot add or modify any Filters other than the log 
level of a logger, changes cannot be made to allow  all log events for a 
specific user or customer to temporarily be logged  (see 
link:filters.html#DynamicThresholdFilter[DynamicThresholdFilter] or  
link:filters.html#ThreadContextMapFilter[ThreadContextMapFilter]) or any other 
kinds of changes to filters.
-Also, in a microservices, clustered environment it is quite likely that these 
changes will need to be propagated to multiple servers at the same time.
-Trying to achieve this via REST calls could be difficult.
+Spring Boot provides another least common denominator approach to logging 
configuration. It will let you set the log level for various Loggers within an 
application which can be dynamically updated via REST endpoints provided by 
Spring. While this works in a lot of cases it does not support any of the more 
advanced filtering features of Log4j. For example, since it cannot add or 
modify any Filters other than the log level of a logger, changes cannot be made 
to allow all log events for a  [...]
 
-Since its first release Log4j has supported reconfiguration through a file.
-Beginning with Log4j 2.12.0 Log4j also supports accessing the configuration 
via HTTP(S) and monitoring the file  for changes by using the HTTP 
"If-Modified-Since" header.
-A patch has also been integrated into Spring Cloud Config starting with 
versions 2.0.3 and 2.1.1 for it to honor the If-Modified-Since header.
-In addition, the  log4j-spring-cloud-config project will listen for update 
events published by Spring Cloud Bus and then verify that the configuration 
file has been modified, so polling via HTTP is not required.
+Since its first release Log4j has supported reconfiguration through a file. 
Beginning with Log4j 2.12.0 Log4j also supports accessing the configuration via 
HTTP(S) and monitoring the file for changes by using the HTTP 
“If-Modified-Since” header. A patch has also been integrated into Spring Cloud 
Config starting with versions 2.0.3 and 2.1.1 for it to honor the 
If-Modified-Since header. In addition, the log4j-spring-cloud-config project 
will listen for update events published by Spring Cl [...]
 
-Log4j also supports composite configurations.
-A distributed application spread across microservices could  share a common 
configuration file that could be used to control things like enabling debug 
logging for a  specific user.
+Log4j also supports composite configurations. A distributed application spread 
across microservices could share a common configuration file that could be used 
to control things like enabling debug logging for a specific user.
 
-While the standard Spring Boot REST endpoints to update logging will still 
work any changes made by those  REST endpoints will be lost if Log4j 
reconfigures itself do to changes in the logging configuration file.
+While the standard Spring Boot REST endpoints to update logging will still 
work any changes made by those REST endpoints will be lost if Log4j 
reconfigures itself do to changes in the logging configuration file.
 
-Further information regarding integration of the 
log4j-spring-cloud-config-client can be found at  
link:../log4j-spring-cloud-config/log4j-spring-cloud-config-client/index.html[Log4j
 Spring Cloud Config Client].
+Further information regarding integration of the 
log4j-spring-cloud-config-client can be found at Log4j Spring Cloud Config 
Client.
 
-== Integration with Spring Boot
+=== Integration with Spring Boot
 
-Log4j integrates with Spring Boot in 2 ways:
+Log4j integrates with Spring Boot in two ways:
 
 . A Spring Lookup can be used to access the Spring application configuration 
from Log4j configuration files.
-. Log4j will access the Spring configuration when it is trying to resolve 
log4j system properties.
+. Log4j will access the Spring configuration when trying to resolve Log4j 
system properties.
 
 Both of these require that the log4j-spring-cloud-client jar is included in 
the application.
 
-== Integration with Docker
+=== Integration with Docker
 
-Applications within a Docker container that log using a Docker logging driver 
can include special  attributes in the formatted log event as described at  
https://docs.docker.com/config/containers/logging/log_tags/[Customize Log 
Driver Output].
-Log4j  provides similar functionality via the 
link:lookups.html#DockerLookup[Docker Lookup].
-More information on Log4j's Docker support may also be found at 
link:../log4j-docker/index.html[Log4j-Docker].
+Applications within a Docker container that log using a Docker logging driver 
can include special attributes in the formatted log event as described at 
Customize Log Driver Output. Log4j provides similar functionality via the 
Docker Lookup. More information on Log4j's Docker support may also be found at 
Log4j-Docker.
 
-== Integration with Kubernetes
+=== Integration with Kubernetes
 
-Applications managed by Kubernetes can bypass the Docker/Kubernetes logging 
infrastructure and log directly to  either a sidecar forwarder or a logging 
aggragator cluster while still including all the kubernetes  attributes by 
using the Log4j 2 link:lookups.html#KubernetesLookup[Kubernetes Lookup].
-More information on Log4j's Kubernetes support may also be found at 
link:../log4j-kubernetes/index.html[Log4j-Kubernetes].
+Applications managed by Kubernetes can bypass the Docker/Kubernetes logging 
infrastructure and log directly to either a sidecar forwarder or a logging 
aggragator cluster while still including all the Kubernetes attributes by using 
the 
https://github.com/fabric8io/kubernetes-client/blob/main/doc/KubernetesLog4j.md[Kubernetes
 Log4j Lookup] maintained by the Fabric8 project.
 
 == Appender Performance
 
-The numbers in the table below represent how much time in seconds was required 
for the application to  call `+logger.debug(...)+` 100,000 times.
-These numbers only include the time taken to deliver to the specifically  
noted endpoint and many not include the actual time required before they are 
available for viewing.
-All  measurements were performed on a MacBook Pro with a 2.9GHz Intel Core I9 
processor with 6 physical and 12  logical cores, 32GB of 2400 MHz DDR4 RAM, and 
1TB of Apple SSD storage.
-The VM used by Docker was managed  by VMWare Fusion and had 4 CPUs and 2 GB of 
RAM.
-These number should be used for relative performance comparisons  as the 
results on another system may vary considerably.
-
-The sample application used can be found under the 
log4j-spring-cloud-config/log4j-spring-cloud-config-samples directory in the 
Log4j https://github.com/apache/logging-log4j2[source repository].
-
-[cols=",>,>,>,>"]
-|===
-| Test | 1 Thread | 2 Threads | 4 Threads | 8 Threads
-
-| Flume Avro
-|
-|
-|
-|
-
-| - Batch Size 1 - JSON
-| 49.11
-| 46.54
-| 46.70
-| 44.92
-
-| - Batch Size 1 - RFC5424
-| 48.30
-| 45.79
-| 46.31
-| 45.50
-
-| - Batch Size 100 - JSON
-| 6.33
-| 3.87
-| 3.57
-| 3.84
-
-| - Batch Size 100 - RFC5424
-| 6.08
-| 3.69
-| 3.22
-| 3.11
-
-| - Batch Size 1000 - JSON
-| 4.83
-| 3.20
-| 3.02
-| 2.11
-
-| - Batch Size 1000 - RFC5424
-| 4.70
-| 2.40
-| 2.37
-| 2.37
-
-| Flume Embedded
-|
-|
-|
-|
-
-| - RFC5424
-| 3.58
-| 2.10
-| 2.10
-| 2.70
-
-| - JSON
-| 4.20
-| 2.49
-| 3.53
-| 2.90
-
-| Kafka Local JSON
-|
-|
-|
-|
-
-| - sendSync true
-| 58.46
-| 38.55
-| 19.59
-| 19.01
-
-| - sendSync false
-| 9.8
-| 10.8
-| 12.23
-| 11.36
-
-| Console
-|
-|
-|
-|
-
-| - JSON / Kubernetes
-| 3.03
-| 3.11
-| 3.04
-| 2.51
-
-| - JSON
-| 2.80
-| 2.74
-| 2.54
-| 2.35
-
-| - Docker fluentd driver
-| 10.65
-| 9.92
-| 10.42
-| 10.27
-
-| Rolling File
-|
-|
-|
-|
-
-| - RFC5424
-| 1.65
-| 0.94
-| 1.22
-| 1.55
-
-| - JSON
-| 1.90
-| 0.95
-| 1.57
-| 1.94
-
-| TCP - Fluent Bit - JSON
-| 2.34
-| 2.167
-| 1.67
-| 2.50
-
-| Async Logger
-|
-|
-|
-|
-
-| - TCP - Fluent Bit - JSON
-| 0.90
-| 0.58
-| 0.36
-| 0.48
-
-| - Console - JSON
-| 0.83
-| 0.57
-| 0.55
-| 0.61
-
-| - Flume Avro - 1000 - JSON
-| 0.76
-| 0.37
-| 0.45
-| 0.68
-|===
+The numbers in the table below represent how much time in seconds was required 
for the application to call logger.debug(...) 100,000 times. These numbers only 
include the time taken to deliver to the specifically noted endpoint and many 
not include the actual time required before they are available for viewing. All 
measurements were performed on a MacBook Pro with a 2.9GHz Intel Core I9 
processor with 6 physical and 12 logical cores, 32GB of 2400 MHz DDR4 RAM, and 
1TB of Apple SSD storag [...]
+
+The sample application used can be found under the 
log4j-spring-cloud-config/log4j-spring-cloud-config-samples directory in the 
Log4j source repository.
+
+[options="header"]
+|===========================================================================
+| Test                        | 1 Thread | 2 Threads | 4 Threads | 8 Threads
+| Flume Avro                  |          |           |           |
+| - Batch Size 1 - JSON       | 49.11    | 46.54     | 46.70     | 44.92
+| - Batch Size 1 - RFC5424    | 48.30    | 45.79     | 46.31     | 45.50
+| - Batch Size 100 - JSON     | 6.33     | 3.87      | 3.57      | 3.84
+| - Batch Size 100 - RFC5424  | 6.08     | 3.69      | 3.22      | 3.11
+| - Batch Size 1000 - JSON    | 4.83     | 3.20      | 3.02      | 2.11
+| - Batch Size 1000 - RFC5424 | 4.70     | 2.40      | 2.37      | 2.37
+| Flume Embedded              |          |           |           |
+| - RFC5424                   | 3.58     | 2.10      | 2.10      | 2.70
+| - JSON                      | 4.20     | 2.49      | 3.53      | 2.90
+| Kafka Local JSON            |          |           |           |
+| - sendSync true             | 58.46    | 38.55     | 19.59     | 19.01
+| - sendSync false            | 9.8      | 10.8      | 12.23     | 11.36
+| Console                     |          |           |           |
+| - JSON / Kubernetes         | 3.03     | 3.11      | 3.04      | 2.51
+| - JSON                      | 2.80     | 2.74      | 2.54      | 2.35
+| - Docker fluentd driver     | 10.65    | 9.92      | 10.42     | 10.27
+| Rolling File                |          |           |           |
+| - RFC5424                   | 1.65     | 0.94      | 1.22      | 1.55
+| - JSON                      | 1.90     | 0.95      | 1.57      | 1.94
+| TCP - Fluent Bit - JSON     | 2.34     | 2.167     | 1.67      | 2.50
+| Async Logger                |          |           |           |
+| - TCP - Fluent Bit - JSON   | 0.90     | 0.58      | 0.36      | 0.48
+| - Console - JSON            | 0.83     | 0.57      | 0.55      | 0.61
+| - Flume Avro - 1000 - JSON  | 0.76     | 0.37      | 0.45      | 0.68
+|===========================================================================
 
 Notes:
 
-. Flume Avro - Buffering is controlled by the batch size.
-Each send is complete when the remote  acknowledges the batch was written to 
its channel.
-These number seem to indicate Flume Avro could benefit from using a pool of 
RPCClients, at least for a batchSize of 1.
-. Flume Embedded - This is essentially asynchronous as it writes to an 
in-memory buffer.
-It is unclear why the performance isn't closer to the AsyncLogger results.
-. Kafka was run in standalone mode on the same laptop as the application.
-See  sendSync set to true requires waiting for an ack from Kafka for each log 
event.
-. Console - System.out is redirected to a file by Docker.
-Testing shows that it would be much slower if it was writing to the terminal 
screen.
+. Flume Avro - Buffering is controlled by the batch size. Each send is 
complete when the remote
+acknowledges the batch was written to its channel. These number seem to 
indicate Flume Avro could
+benefit from using a pool of RPCClients, at least for a batchSize of 1.
+. Flume Embedded - This is essentially asynchronous as it writes to an 
in-memory buffer. It is
+unclear why the performance isn't closer to the AsyncLogger results.
+. Kafka was run in standalone mode on the same laptop as the application. See  
sendSync set to true
+requires waiting for an ack from Kafka for each log event.
+. Console - System.out is redirected to a file by Docker. Testing shows that 
it would be much
+slower if it was writing to the terminal screen.
 . Rolling File - Test uses the default buffer size of 8K.
 . TCP to Fluent Bit - The Socket Appender uses a default buffer size of 8K.
-. Async Loggers - These all write to a circular buffer and return to the 
application.
-The actual I/O will take place on a separate thread.
-If writing the events is performed more slowly than  events are being created 
eventually the buffer will fill up and logging will be performed at  the same 
pace that log events are written.
+. Async Loggers - These all write to a circular buffer and return to the 
application. The actual
+I/O will take place on a separate thread. If writing the events is performed 
more slowly than
+events are being created eventually the buffer will fill up and logging will 
be performed at
+the same pace that log events are written.
 
 == Logging Recommendations
 
-. Use asynchronous logging unless guaranteed delivery is absolutely required.
-As  the performance numbers show, so long as the volume of logging is not high 
enough to fill up the  circular buffer the overhead of logging will almost be 
unnoticeable to the application.
-. If overall performance is a consideration or you require multiline events 
such as stack traces be processed properly then log via TCP to a companion 
container that acts as a log forwarder or directly to a log aggregator as shown 
above in <<ELK,Logging with ELK>>.
-Use the + Log4j Docker Lookup to add the container information to each log 
event.
-. Whenever guaranteed delivery is required use Flume Avro with a batch size of 
1 or another Appender such  as the Kafka Appender with syncSend set to true 
that only return control after the downstream agent  acknowledges receipt of 
the event.
-Beware that using an Appender that writes each event individually should  be 
kept to a minimum since it is much slower than sending buffered events.
-. Logging to files within the container is discouraged.
-Doing so requires that a volume be declared in  the Docker configuration and 
that the file be tailed by a log forwarder.
-However, it performs  better than logging to the standard output stream.
-If logging via TCP is not an option and proper multiline handling is required 
then consider this option.
+. Use asynchronous logging unless guaranteed delivery is absolutely required. 
As
+the performance numbers show, so long as the volume of logging is not high 
enough to fill up the
+circular buffer the overhead of logging will almost be unnoticeable to the 
application.
+. If overall performance is a consideration or you require multiline events 
such as stack traces
+be processed properly then log via TCP to a companion container that acts as a 
log forwarder or directly
+to a log aggregator as shown above in xref:#ELK[Logging with ELK]. Use the
+Log4j Docker Lookup to add the container information to each log event.
+. Whenever guaranteed delivery is required use Flume Avro with a batch size of 
1 or another Appender such
+as the Kafka Appender with syncSend set to true that only return control after 
the downstream agent
+acknowledges receipt of the event. Beware that using an Appender that writes 
each event individually should
+be kept to a minimum since it is much slower than sending buffered events.
+. Logging to files within the container is discouraged. Doing so requires that 
a volume be declared in
+the Docker configuration and that the file be tailed by a log forwarder. 
However, it performs
+better than logging to the standard output stream. If logging via TCP is not 
an option and
+proper multiline handling is required then consider this option.
diff --git a/src/site/asciidoc/manual/lookups.adoc 
b/src/site/asciidoc/manual/lookups.adoc
index 8f72402794..a79bbbee6c 100644
--- a/src/site/asciidoc/manual/lookups.adoc
+++ b/src/site/asciidoc/manual/lookups.adoc
@@ -275,7 +275,13 @@ The JndiLookup allows variables to be retrieved via JNDI. 
By default the
 key will be prefixed with java:comp/env/, however if the key contains a
 ":" no prefix will be added.
 
-The JNDI Lookup only supports the java protocol or no protocol (as shown in 
the example below).
+By default the JNDI Lookup only supports the java, ldap, and ldaps protocols 
or no protocol. Additional
+protocols may be supported by specifying them on the 
``log4j2.allowedJndiProtocols`` property.
+When using LDAP Java classes that implement the Referenceable interface are 
not supported for security
+reasons. Only the Java primative classes are supported by default as well as 
any classes specified by the
+``log4j2.allowedLdapClasses`` property. When using LDAP only references to the 
local host name
+or ip address are supported along with any hosts or ip addresses listed in the
+``log4j2.allowedLdapHosts`` property.
 
 [source,xml]
 ----
@@ -304,59 +310,7 @@ 
https://docs.oracle.com/javase/8/docs/api/java/lang/management/RuntimeMXBean.htm
 [#KubernetesLookup]
 == Kubernetes Lookup
 
-The KubernetesLookup can be used to lookup attributes from the Kubernetes 
environment for the container
-the application is running in.
-
-Log4j Kubernetes provides access to the following container attributes:
-[cols="1m,4a"]
-|===
-|Attribute |Description
-|accountName|The service account name
-|clusterName|The name of the cluster the application is deployed in
-|containerId|>The full id assigned to the container
-|containerName|The name assigned to the container
-|host|The name assigned to the host operating system
-|hostIp|The host's ip address
-|imageId|The id assigned to the container image
-|imageName|The name assigned to the container image
-|labels|All labels formatted in a list
-|labels.app|The application name
-|labels.podTemplateHash|The pod's template hash value
-|masterUrl|The URL used to access the API server
-|namespaceId|The id of the namespace the various kubernetes components are 
located within
-|namespaceName|The namespace the various kubernetes components are located 
within
-|podId|The pod's ip number
-|podIp|The pod's ip address
-|podName|The name of the pod
-|===
-
-[source,xml]
-----
-<GelfLayout includeStackTrace="true" host="${hostName}" 
includeThreadContext="true" includeNullDelimiter="true"
-                  compressionType="OFF">
-    
<ThreadContextIncludes>requestId,sessionId,loginId,userId,ipAddress,callingHost</ThreadContextIncludes>
-    <MessagePattern>%d [%t] %-5p %X{requestId, sessionId, loginId, userId, 
ipAddress} %C{1.}.%M:%L - %m%n</MessagePattern>
-    <KeyValuePair key="docker.containerId" value="${docker:containerId:-}"/>
-    <KeyValuePair key="application" 
value="$${lower:${spring:spring.application.name}}"/>
-    <KeyValuePair key="kubernetes.serviceAccountName" 
value="${k8s:accountName:-}"/>
-    <KeyValuePair key="kubernetes.clusterName" value="${k8s:clusterName:-}/>
-    <KeyValuePair key="kubernetes.containerId" value="${k8s:containerId:-}"/>
-    <KeyValuePair key="kubernetes.containerName" 
value="${k8s:containerName:-}"/>
-    <KeyValuePair key="kubernetes.host" value="${k8s:host:-}"/>
-    <KeyValuePair key="kubernetes.labels.app" value="${k8s:labels.app:-}"/>
-    <KeyValuePair key="kubernetes.labels.pod-template-hash" 
value="${k8s:labels.podTemplateHash:-}"/>
-    <KeyValuePair key="kubernetes.master_url" value="${k8s:masterUrl:-}"/>
-    <KeyValuePair key="kubernetes.namespaceId" value="${k8s:namespaceId:-}"/>
-    <KeyValuePair key="kubernetes.namespaceName" 
value="${k8s:namespaceName:-}"/>
-    <KeyValuePair key="kubernetes.podID" value="${k8s:podId:-}"/>
-    <KeyValuePair key="kubernetes.podIP" value="${k8s:podIp:-}"/>
-    <KeyValuePair key="kubernetes.podName" value="${k8s:podName:-}"/>
-    <KeyValuePair key="kubernetes.imageId" value="${k8s:imageId:-}"/>
-    <KeyValuePair key="kubernetes.imageName" value="${k8s:imageName:-}"/>
-</GelfLayout>
-----
-
-This Lookup is subject to the configuration requirements listed at 
link:../log4j-kubernetes/index.html[Log4j Kubernetes Support]
+For retrieving attributes using Fabric8's Kubernetes Client, see their 
https://github.com/fabric8io/kubernetes-client/blob/main/doc/KubernetesLog4j.md[Kubernetes
 Log4j Lookup].
 
 [#Log4jConfigLookup]
 == Log4j Configuration Location Lookup
@@ -429,6 +383,7 @@ For example, suppose the static void main String[] 
arguments are:
 
 Then the following substitutions are possible:
 
+[cols="m,m"]
 |===
 |Expression |Result
 
@@ -460,7 +415,6 @@ Then the following substitutions are possible:
 |true
 |===
 
-
 Example usage:
 
 [source,xml]

Reply via email to