This is an automated email from the ASF dual-hosted git repository.

oleewere pushed a commit to branch branch-2.7
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/branch-2.7 by this push:
     new 4482cdc  AMBARI-24480. Upgrade Infra Solr (from Solr 7.3.1 to Solr 
7.4.0) (#2082)
4482cdc is described below

commit 4482cdcacc935632a1a8266923c01a2795203755
Author: Olivér Szabó <[email protected]>
AuthorDate: Thu Aug 16 11:14:57 2018 +0200

    AMBARI-24480. Upgrade Infra Solr (from Solr 7.3.1 to Solr 7.4.0) (#2082)
    
    * AMBARI-24480. Upload Infra Solr (from Solr 7.3.1 to Solr 7.4.0)
    
    * AMBARI-24480. Fix naming
---
 .../ambari-infra-manager/docker/docker-compose.yml |  2 +-
 .../docker/infra-manager-docker-compose.sh         |  2 +-
 ambari-infra/ambari-infra-solr-client/README.md    |  8 +-
 .../src/main/python/migrationHelper.py             |  2 +-
 .../src/main/resources/solrIndexHelper.sh          |  8 +-
 .../docker/infra-solr-docker-compose.sh            |  2 +-
 ambari-infra/pom.xml                               |  2 +-
 .../main/configsets/audit_logs/conf/solrconfig.xml |  2 +-
 .../configsets/hadoop_logs/conf/solrconfig.xml     |  2 +-
 .../main/configsets/history/conf/solrconfig.xml    |  2 +-
 ambari-logsearch/docker/Dockerfile                 |  2 +-
 ambari-logsearch/docker/docker-compose.yml         |  2 +-
 ambari-logsearch/docker/logsearch-docker.sh        |  2 +-
 ambari-logsearch/docker/solr.yml                   |  2 +-
 ambari-logsearch/docker/sso.yml                    |  2 +-
 ambari-logsearch/pom.xml                           |  2 +-
 .../ambari/server/upgrade/UpgradeCatalog271.java   | 99 ++++++++++++++++++++++
 .../0.1.0/configuration/infra-solr-log4j.xml       |  4 +-
 .../0.1.0/package/scripts/setup_infra_solr.py      |  2 +-
 .../0.1.0/properties/infra-solr-env.sh.j2          |  2 +-
 .../0.1.0/properties/solr-log4j2.xml.j2            | 74 ++++++++++++++++
 .../server/upgrade/UpgradeCatalog271Test.java      | 64 ++++++++++++++
 22 files changed, 263 insertions(+), 26 deletions(-)

diff --git a/ambari-infra/ambari-infra-manager/docker/docker-compose.yml 
b/ambari-infra/ambari-infra-manager/docker/docker-compose.yml
index d77205f..2369d85 100644
--- a/ambari-infra/ambari-infra-manager/docker/docker-compose.yml
+++ b/ambari-infra/ambari-infra-manager/docker/docker-compose.yml
@@ -27,7 +27,7 @@ services:
       ZOO_SERVERS: server.1=zookeeper:2888:3888
   solr:
 #  TODO: use infra-solr
-    image: solr:${SOLR_VERSION:-7.3.1}
+    image: solr:${SOLR_VERSION:-7.4.0}
     restart: always
     hostname: solr
     ports:
diff --git 
a/ambari-infra/ambari-infra-manager/docker/infra-manager-docker-compose.sh 
b/ambari-infra/ambari-infra-manager/docker/infra-manager-docker-compose.sh
index c36d671..5271909 100755
--- a/ambari-infra/ambari-infra-manager/docker/infra-manager-docker-compose.sh
+++ b/ambari-infra/ambari-infra-manager/docker/infra-manager-docker-compose.sh
@@ -72,7 +72,7 @@ AMBARI_LOCATION=$AMBARI_LOCATION
 ZOOKEEPER_VERSION=3.4.10
 ZOOKEEPER_CONNECTION_STRING=zookeeper:2181
 
-SOLR_VERSION=7.3.1
+SOLR_VERSION=7.4.0
 
 HADOOP_VERSION=3.0.0
 EOF
diff --git a/ambari-infra/ambari-infra-solr-client/README.md 
b/ambari-infra/ambari-infra-solr-client/README.md
index a14f92a..2b6d004 100644
--- a/ambari-infra/ambari-infra-solr-client/README.md
+++ b/ambari-infra/ambari-infra-solr-client/README.md
@@ -472,7 +472,7 @@ If the script finished successfully and everything looks 
green on Ambari UI as w
 Migration for `ranger_audits` collection (cores):
 
 ```bash
-# by default, you will mirate to Lucene 6.6.2, if you want to migrate again to 
Solr 7 (not requred), you can use --version 7.3.1 flag
+# by default, you will mirate to Lucene 6.6.2, if you want to migrate again to 
Solr 7 (not requred), you can use --version 7.4.0 flag
 /usr/lib/ambari-infra-solr-client/migrationHelper.py --ini-file 
$CONFIG_INI_LOCATION --action migrate -s RANGER
 ```
 
@@ -487,7 +487,7 @@ infra-lucene-index-tool upgrade-index -d /tmp/ranger-backup 
-f -b -g
 # with 'infra-lucene-index-tool help' command you can checkout the command 
line options
 ```
 
-By default, the tool will migrate from lucene version 5 to lucene version 
6.6.2. (that's ok for Solr 7) If you want a lucene 7 index, you will need to 
re-run the migration tool command with `-v 7.3.1` option.
+By default, the tool will migrate from lucene version 5 to lucene version 
6.6.2. (that's ok for Solr 7) If you want a lucene 7 index, you will need to 
re-run the migration tool command with `-v 7.4.0` option.
 
 #### <a id="v/2.-migrate-atlas-collections">VI/2. Migrate Atlas collections</a>
 
@@ -509,7 +509,7 @@ infra-lucene-index-tool upgrade-index -d 
/tmp/fulltext_index_backup -f -b -g
 # with 'infra-lucene-index-tool help' command you can checkout the command 
line options
 ```
 
-By default, the tool will migrate from lucene version 5 to lucene version 
6.6.2. (that's ok for Solr 7) If you want a lucene 7 index, you will need to 
re-run the migration tool command with `-v 7.3.1` option.
+By default, the tool will migrate from lucene version 5 to lucene version 
6.6.2. (that's ok for Solr 7) If you want a lucene 7 index, you will need to 
re-run the migration tool command with `-v 7.4.0` option.
 
 ### <a id="vi.-restore-collections">VII. Restore Collections</a>
 
@@ -852,7 +852,7 @@ Options:
                         location of the index backups (for ranger). required
                         only if no backup path in the ini file
   --version=INDEX_VERSION
-                        lucene index version for migration (6.6.2 or 7.3.1)
+                        lucene index version for migration (6.6.2 or 7.4.0)
   --solr-async-request-tries=SOLR_ASYNC_REQUEST_TRIES
                         number of max tries for async Solr requests (e.g.:
                         delete operation)
diff --git 
a/ambari-infra/ambari-infra-solr-client/src/main/python/migrationHelper.py 
b/ambari-infra/ambari-infra-solr-client/src/main/python/migrationHelper.py
index 8cb103d..b2e835b 100755
--- a/ambari-infra/ambari-infra-solr-client/src/main/python/migrationHelper.py
+++ b/ambari-infra/ambari-infra-solr-client/src/main/python/migrationHelper.py
@@ -1911,7 +1911,7 @@ if __name__=="__main__":
   parser.add_option("--atlas-index-location", dest="atlas_index_location", 
type="string", help="location of the index backups (for atlas). required only 
if no backup path in the ini file")
   parser.add_option("--ranger-index-location", dest="ranger_index_location", 
type="string", help="location of the index backups (for ranger). required only 
if no backup path in the ini file")
 
-  parser.add_option("--version", dest="index_version", type="string", 
default="6.6.2", help="lucene index version for migration (6.6.2 or 7.3.1)")
+  parser.add_option("--version", dest="index_version", type="string", 
default="6.6.2", help="lucene index version for migration (6.6.2 or 7.4.0)")
   parser.add_option("--solr-async-request-tries", 
dest="solr_async_request_tries", type="int", default=400,  help="number of max 
tries for async Solr requests (e.g.: delete operation)")
   parser.add_option("--request-tries", dest="request_tries", type="int", 
help="number of tries for BACKUP/RESTORE status api calls in the request")
   parser.add_option("--request-time-interval", dest="request_time_interval", 
type="int", help="time interval between BACKUP/RESTORE status api calls in the 
request")
diff --git 
a/ambari-infra/ambari-infra-solr-client/src/main/resources/solrIndexHelper.sh 
b/ambari-infra/ambari-infra-solr-client/src/main/resources/solrIndexHelper.sh
index dfa96aa..5cd5b5f 100755
--- 
a/ambari-infra/ambari-infra-solr-client/src/main/resources/solrIndexHelper.sh
+++ 
b/ambari-infra/ambari-infra-solr-client/src/main/resources/solrIndexHelper.sh
@@ -43,7 +43,7 @@ function print_help() {
      -b, --backup-enabled                    Use indexer tool with backup 
snapshots. (core filter won't be used)
      -g, --debug                             Enable debug mode, IndexUpgrader 
output will be verbose.
      -f, --force                             Force to start index upgrade, 
even is the version is at least 6.
-     -v, --version                           Lucene version to upgrade 
(default: 6.6.2, available: 6.6.2, 7.3.1)
+     -v, --version                           Lucene version to upgrade 
(default: 6.6.2, available: 6.6.2, 7.4.0)
 EOF
 }
 
@@ -51,7 +51,7 @@ function upgrade_core() {
   local INDEX_DIR=${1:?"usage: <index_base_dir> e.g.: 
/opt/ambari_infra_solr/data"}
   local FORCE_UPDATE=${2:?"usage <force_update_flag> e.g.: true"}
   local SOLR_CORE_FILTERS=${3:?"usage: <comma separated core filters> e.g.: 
hadoop_logs,audit_logs,history"}
-  local LUCENE_VERSION=${4:?"usage <lucene_index_version> e.g.: 7.3.1"}
+  local LUCENE_VERSION=${4:?"usage <lucene_index_version> e.g.: 7.4.0"}
   local BACKUP_MODE=${5:?"usage <backup_mode_enabled> e.g.: true"}
   local DEBUG_MODE=${6:?"usage <debug_mode> e.g.: true"}
   SOLR_CORE_FILTER_ARR=$(echo $SOLR_CORE_FILTERS | sed "s/,/ /g")
@@ -204,12 +204,12 @@ function upgrade_index() {
 
 function upgrade_index_tool() {
   # see: https://cwiki.apache.org/confluence/display/solr/IndexUpgrader+Tool
-  : ${INDEX_VERSION:?"Please set the INDEX_VERSION variable! (6.6.2 or 7.3.1)"}
+  : ${INDEX_VERSION:?"Please set the INDEX_VERSION variable! (6.6.2 or 7.4.0)"}
   PATH=$JAVA_HOME/bin:$PATH $JVM -classpath 
"$DIR/migrate/lucene-core-$INDEX_VERSION.jar:$DIR/migrate/lucene-backward-codecs-$INDEX_VERSION.jar"
 org.apache.lucene.index.IndexUpgrader ${@}
 }
 
 function check_index_tool() {
-  : ${INDEX_VERSION:?"Please set the INDEX_VERSION variable! (6.6.2 or 7.3.1)"}
+  : ${INDEX_VERSION:?"Please set the INDEX_VERSION variable! (6.6.2 or 7.4.0)"}
   PATH=$JAVA_HOME/bin:$PATH $JVM -classpath 
"$DIR/migrate/lucene-core-$INDEX_VERSION.jar:$DIR/migrate/lucene-backward-codecs-$INDEX_VERSION.jar"
 org.apache.lucene.index.CheckIndex ${@}
 }
 
diff --git 
a/ambari-infra/ambari-infra-solr-plugin/docker/infra-solr-docker-compose.sh 
b/ambari-infra/ambari-infra-solr-plugin/docker/infra-solr-docker-compose.sh
index 69d8e08..502d87a 100755
--- a/ambari-infra/ambari-infra-solr-plugin/docker/infra-solr-docker-compose.sh
+++ b/ambari-infra/ambari-infra-solr-plugin/docker/infra-solr-docker-compose.sh
@@ -72,7 +72,7 @@ AMBARI_LOCATION=$AMBARI_LOCATION
 ZOOKEEPER_VERSION=3.4.10
 ZOOKEEPER_CONNECTION_STRING=zookeeper:2181
 
-SOLR_VERSION=7.3.1
+SOLR_VERSION=7.4.0
 EOF
 }
 
diff --git a/ambari-infra/pom.xml b/ambari-infra/pom.xml
index 0b3e5a8..3934c96 100644
--- a/ambari-infra/pom.xml
+++ b/ambari-infra/pom.xml
@@ -25,7 +25,7 @@
 
   <properties>
     <jdk.version>1.8</jdk.version>
-    <solr.version>7.3.1</solr.version>
+    <solr.version>7.4.0</solr.version>
     <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
     <python.ver>python &gt;= 2.6</python.ver>
     <deb.python.ver>python (&gt;= 2.6)</deb.python.ver>
diff --git 
a/ambari-logsearch/ambari-logsearch-server/src/main/configsets/audit_logs/conf/solrconfig.xml
 
b/ambari-logsearch/ambari-logsearch-server/src/main/configsets/audit_logs/conf/solrconfig.xml
index a75070e..8f54121 100644
--- 
a/ambari-logsearch/ambari-logsearch-server/src/main/configsets/audit_logs/conf/solrconfig.xml
+++ 
b/ambari-logsearch/ambari-logsearch-server/src/main/configsets/audit_logs/conf/solrconfig.xml
@@ -35,7 +35,7 @@
        that you fully re-index after changing this setting as it can
        affect both how text is indexed and queried.
   -->
-  <luceneMatchVersion>7.3.1</luceneMatchVersion>
+  <luceneMatchVersion>7.4.0</luceneMatchVersion>
 
   <!-- <lib/> directives can be used to instruct Solr to load any Jars
        identified and use them to resolve any "plugins" specified in
diff --git 
a/ambari-logsearch/ambari-logsearch-server/src/main/configsets/hadoop_logs/conf/solrconfig.xml
 
b/ambari-logsearch/ambari-logsearch-server/src/main/configsets/hadoop_logs/conf/solrconfig.xml
index 424ca89..67db2e1 100644
--- 
a/ambari-logsearch/ambari-logsearch-server/src/main/configsets/hadoop_logs/conf/solrconfig.xml
+++ 
b/ambari-logsearch/ambari-logsearch-server/src/main/configsets/hadoop_logs/conf/solrconfig.xml
@@ -35,7 +35,7 @@
        that you fully re-index after changing this setting as it can
        affect both how text is indexed and queried.
   -->
-  <luceneMatchVersion>7.3.1</luceneMatchVersion>
+  <luceneMatchVersion>7.4.0</luceneMatchVersion>
 
   <!-- <lib/> directives can be used to instruct Solr to load any Jars
        identified and use them to resolve any "plugins" specified in
diff --git 
a/ambari-logsearch/ambari-logsearch-server/src/main/configsets/history/conf/solrconfig.xml
 
b/ambari-logsearch/ambari-logsearch-server/src/main/configsets/history/conf/solrconfig.xml
index 56822e4..866b218 100644
--- 
a/ambari-logsearch/ambari-logsearch-server/src/main/configsets/history/conf/solrconfig.xml
+++ 
b/ambari-logsearch/ambari-logsearch-server/src/main/configsets/history/conf/solrconfig.xml
@@ -16,7 +16,7 @@
  limitations under the License.
 -->
 <config>
-  <luceneMatchVersion>7.3.1</luceneMatchVersion>
+  <luceneMatchVersion>7.4.0</luceneMatchVersion>
 
   <lib dir="${solr.install.dir:../../../..}/dist/" 
regex="solr-dataimporthandler-.*\.jar" />
 
diff --git a/ambari-logsearch/docker/Dockerfile 
b/ambari-logsearch/docker/Dockerfile
index ca6ac5e..d076565 100644
--- a/ambari-logsearch/docker/Dockerfile
+++ b/ambari-logsearch/docker/Dockerfile
@@ -54,7 +54,7 @@ RUN echo 'X11DisplayOffset 10\n' /etc/ssh/sshd_config
 RUN git config --global url."https://".insteadOf git://
 
 # Install Solr
-ENV SOLR_VERSION 7.3.1
+ENV SOLR_VERSION 7.4.0
 RUN wget --no-check-certificate -O /root/solr-$SOLR_VERSION.tgz 
http://public-repo-1.hortonworks.com/ARTIFACTS/dist/lucene/solr/$SOLR_VERSION/solr-$SOLR_VERSION.tgz
 RUN cd /root && tar -zxvf /root/solr-$SOLR_VERSION.tgz
 
diff --git a/ambari-logsearch/docker/docker-compose.yml 
b/ambari-logsearch/docker/docker-compose.yml
index b73ee5c..fb14622 100644
--- a/ambari-logsearch/docker/docker-compose.yml
+++ b/ambari-logsearch/docker/docker-compose.yml
@@ -26,7 +26,7 @@ services:
       ZOO_MY_ID: 1
       ZOO_SERVERS: server.1=zookeeper:2888:3888
   solr:
-    image: solr:${SOLR_VERSION:-7.3.1}
+    image: solr:${SOLR_VERSION:-7.4.0}
     restart: always
     hostname: solr
     ports:
diff --git a/ambari-logsearch/docker/logsearch-docker.sh 
b/ambari-logsearch/docker/logsearch-docker.sh
index 866ce41..72a332a 100755
--- a/ambari-logsearch/docker/logsearch-docker.sh
+++ b/ambari-logsearch/docker/logsearch-docker.sh
@@ -109,7 +109,7 @@ AMBARI_LOCATION=$AMBARI_LOCATION
 ZOOKEEPER_VERSION=3.4.10
 ZOOKEEPER_CONNECTION_STRING=zookeeper:2181
 
-SOLR_VERSION=7.3.1
+SOLR_VERSION=7.4.0
 EOF
     echo ".env file has been created. Check it out before starting Log Search. 
($sdir/.env)"
     exit
diff --git a/ambari-logsearch/docker/solr.yml b/ambari-logsearch/docker/solr.yml
index 59ac354..2975af6 100644
--- a/ambari-logsearch/docker/solr.yml
+++ b/ambari-logsearch/docker/solr.yml
@@ -15,7 +15,7 @@
 version: '3.3'
 services:
   solr:
-    image: solr:${SOLR_VERSION:-7.3.1}
+    image: solr:${SOLR_VERSION:-7.4.0}
     restart: always
     networks:
       - logsearch-network
diff --git a/ambari-logsearch/docker/sso.yml b/ambari-logsearch/docker/sso.yml
index 311e448..0837dd8 100644
--- a/ambari-logsearch/docker/sso.yml
+++ b/ambari-logsearch/docker/sso.yml
@@ -26,7 +26,7 @@ services:
       ZOO_MY_ID: 1
       ZOO_SERVERS: server.1=zookeeper:2888:3888
   solr:
-    image: solr:${SOLR_VERSION:-7.3.1}
+    image: solr:${SOLR_VERSION:-7.4.0}
     restart: always
     hostname: solr
     ports:
diff --git a/ambari-logsearch/pom.xml b/ambari-logsearch/pom.xml
index f1d269a..276a707 100644
--- a/ambari-logsearch/pom.xml
+++ b/ambari-logsearch/pom.xml
@@ -45,7 +45,7 @@
     <deb.python.ver>python (&gt;= 2.6)</deb.python.ver>
     <deb.architecture>amd64</deb.architecture>
     <deb.dependency.list>${deb.python.ver}</deb.dependency.list>
-    <solr.version>7.3.1</solr.version>
+    <solr.version>7.4.0</solr.version>
     <hadoop.version>3.0.0</hadoop.version>
     <common.io.version>2.5</common.io.version>
     <zookeeper.version>3.4.6.2.3.0.0-2557</zookeeper.version>
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog271.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog271.java
index 854b358..ddb7541 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog271.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog271.java
@@ -21,9 +21,11 @@ import static 
org.apache.ambari.server.upgrade.UpgradeCatalog270.AMBARI_INFRA_NE
 import static 
org.apache.ambari.server.upgrade.UpgradeCatalog270.AMBARI_INFRA_OLD_NAME;
 
 import java.sql.SQLException;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.Set;
+import java.util.function.Function;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
@@ -54,6 +56,64 @@ public class UpgradeCatalog271 extends 
AbstractUpgradeCatalog {
    */
   private static final Logger LOG = 
LoggerFactory.getLogger(UpgradeCatalog271.class);
 
+  private static final String SOLR_NEW_LOG4J2_XML = "<Configuration>\n" +
+    "  <Appenders>\n" +
+    "\n" +
+    "    <Console name=\"STDOUT\" target=\"SYSTEM_OUT\">\n" +
+    "      <PatternLayout>\n" +
+    "        <Pattern>\n" +
+    "          %d{ISO8601} [%t] %-5p [%X{collection} %X{shard} %X{replica} 
%X{core}] %C (%F:%L) - %m%n\n" +
+    "        </Pattern>\n" +
+    "      </PatternLayout>\n" +
+    "    </Console>\n" +
+    "\n" +
+    "    <RollingFile\n" +
+    "        name=\"RollingFile\"\n" +
+    "        fileName=\"{{infra_solr_log_dir}}/solr.log\"\n" +
+    "        filePattern=\"{{infra_solr_log_dir}}/solr.log.%i\" >\n" +
+    "      <PatternLayout>\n" +
+    "        <Pattern>\n" +
+    "          %d{ISO8601} [%t] %-5p [%X{collection} %X{shard} %X{replica} 
%X{core}] %C (%F:%L) - %m%n\n" +
+    "        </Pattern>\n" +
+    "      </PatternLayout>\n" +
+    "      <Policies>\n" +
+    "        <OnStartupTriggeringPolicy />\n" +
+    "        <SizeBasedTriggeringPolicy size=\"{{infra_log_maxfilesize}} 
MB\"/>\n" +
+    "      </Policies>\n" +
+    "      <DefaultRolloverStrategy max=\"{{infra_log_maxbackupindex}}\"/>\n" +
+    "    </RollingFile>\n" +
+    "\n" +
+    "    <RollingFile\n" +
+    "        name=\"SlowFile\"\n" +
+    "        fileName=\"{{infra_solr_log_dir}}/solr_slow_requests.log\"\n" +
+    "        filePattern=\"{{infra_solr_log_dir}}/solr_slow_requests.log.%i\" 
>\n" +
+    "      <PatternLayout>\n" +
+    "        <Pattern>\n" +
+    "          %d{ISO8601} [%t] %-5p [%X{collection} %X{shard} %X{replica} 
%X{core}] %C (%F:%L) - %m%n\n" +
+    "        </Pattern>\n" +
+    "      </PatternLayout>\n" +
+    "      <Policies>\n" +
+    "        <OnStartupTriggeringPolicy />\n" +
+    "        <SizeBasedTriggeringPolicy size=\"{{infra_log_maxfilesize}} 
MB\"/>\n" +
+    "      </Policies>\n" +
+    "      <DefaultRolloverStrategy max=\"{{infra_log_maxbackupindex}}\"/>\n" +
+    "    </RollingFile>\n" +
+    "\n" +
+    "  </Appenders>\n" +
+    "  <Loggers>\n" +
+    "    <Logger name=\"org.apache.hadoop\" level=\"warn\"/>\n" +
+    "    <Logger name=\"org.apache.solr.update.LoggingInfoStream\" 
level=\"off\"/>\n" +
+    "    <Logger name=\"org.apache.zookeeper\" level=\"warn\"/>\n" +
+    "    <Logger name=\"org.apache.solr.core.SolrCore.SlowRequest\" 
level=\"warn\" additivity=\"false\">\n" +
+    "      <AppenderRef ref=\"SlowFile\"/>\n" +
+    "    </Logger>\n" +
+    "\n" +
+    "    <Root level=\"warn\">\n" +
+    "      <AppenderRef ref=\"RollingFile\"/>\n" +
+    "      <!-- <AppenderRef ref=\"STDOUT\"/> -->\n" +
+    "    </Root>\n" +
+    "  </Loggers>\n" +
+    "</Configuration>";
   private static final String SERVICE_CONFIG_MAPPING_TABLE = 
"serviceconfigmapping";
   private static final String CLUSTER_CONFIG_TABLE = "clusterconfig";
   protected static final String CLUSTERS_TABLE = "clusters";
@@ -114,6 +174,7 @@ public class UpgradeCatalog271 extends 
AbstractUpgradeCatalog {
     updateRangerKmsDbUrl();
     renameAmbariInfraInConfigGroups();
     removeLogSearchPatternConfigs();
+    updateSolrConfigurations();
   }
 
   /**
@@ -270,4 +331,42 @@ public class UpgradeCatalog271 extends 
AbstractUpgradeCatalog {
         new 
DBAccessor.DBColumnInfo(CLUSTERS_BLUEPRINT_PROVISIONING_STATE_COLUMN, 
String.class, 255,
             BlueprintProvisioningState.NONE, true));
   }
+
+  /**
+   * Upgrade lucene version to 7.4.0 in Solr config of Log Search collections 
and Solr Log4j config
+   */
+  protected void updateSolrConfigurations() throws AmbariException {
+    AmbariManagementController ambariManagementController = 
injector.getInstance(AmbariManagementController.class);
+    Clusters clusters = ambariManagementController.getClusters();
+    if (clusters == null)
+      return;
+
+    Map<String, Cluster> clusterMap = clusters.getClusters();
+
+    if (clusterMap == null || clusterMap.isEmpty())
+      return;
+
+    for (final Cluster cluster : clusterMap.values()) {
+      updateConfig(cluster, "logsearch-service_logs-solrconfig", (content) -> 
updateLuceneMatchVersion(content,"7.4.0"));
+      updateConfig(cluster, "logsearch-audit_logs-solrconfig", (content) -> 
updateLuceneMatchVersion(content,"7.4.0"));
+      updateConfig(cluster, "infra-solr-log4j", (content) -> 
SOLR_NEW_LOG4J2_XML);
+    }
+  }
+
+  private void updateConfig(Cluster cluster, String configType, 
Function<String, String> contentUpdater) throws AmbariException {
+    Config config = cluster.getDesiredConfigByType(configType);
+    if (config == null)
+      return;
+    if (config.getProperties() == null || 
!config.getProperties().containsKey("content"))
+      return;
+
+    String content = config.getProperties().get("content");
+    content = contentUpdater.apply(content);
+    updateConfigurationPropertiesForCluster(cluster, configType, 
Collections.singletonMap("content", content), true, true);
+  }
+
+  private String updateLuceneMatchVersion(String content, String 
newLuceneMatchVersion) {
+    return content.replaceAll("<luceneMatchVersion>.*</luceneMatchVersion>",
+      "<luceneMatchVersion>" + newLuceneMatchVersion + 
"</luceneMatchVersion>");
+  }
 }
\ No newline at end of file
diff --git 
a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/configuration/infra-solr-log4j.xml
 
b/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/configuration/infra-solr-log4j.xml
index e797b37..daed2cd 100644
--- 
a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/configuration/infra-solr-log4j.xml
+++ 
b/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/configuration/infra-solr-log4j.xml
@@ -44,11 +44,11 @@
   <property>
     <name>content</name>
     <display-name>infra-solr-log4j template</display-name>
-    <description>This is the jinja template for log4j.properties</description>
+    <description>This is the jinja template for log4j2.xml</description>
     <value/>
     <property-type>VALUE_FROM_PROPERTY_FILE</property-type>
     <value-attributes>
-      <property-file-name>solr-log4j.properties.j2</property-file-name>
+      <property-file-name>solr-log4j2.xml.j2</property-file-name>
       <property-file-type>text</property-file-type>
     </value-attributes>
     <on-ambari-upgrade add="false"/>
diff --git 
a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/package/scripts/setup_infra_solr.py
 
b/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/package/scripts/setup_infra_solr.py
index b6055ea..5690084 100644
--- 
a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/package/scripts/setup_infra_solr.py
+++ 
b/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/package/scripts/setup_infra_solr.py
@@ -67,7 +67,7 @@ def setup_infra_solr(name = None):
          group=params.user_group
          )
 
-    File(format("{infra_solr_conf}/log4j.properties"),
+    File(format("{infra_solr_conf}/log4j2.xml"),
          content=InlineTemplate(params.solr_log4j_content),
          owner=params.infra_solr_user,
          group=params.user_group
diff --git 
a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/properties/infra-solr-env.sh.j2
 
b/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/properties/infra-solr-env.sh.j2
index 0ca8522..524a96f 100644
--- 
a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/properties/infra-solr-env.sh.j2
+++ 
b/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/properties/infra-solr-env.sh.j2
@@ -73,7 +73,7 @@ SOLR_HOME={{infra_solr_datadir}}
 # Solr provides a default Log4J configuration properties file in 
server/resources
 # however, you may want to customize the log settings and file appender 
location
 # so you can point the script to use a different log4j.properties file
-LOG4J_PROPS={{infra_solr_conf}}/log4j.properties
+LOG4J_PROPS={{infra_solr_conf}}/log4j2.xml
 
 # Location where Solr should write logs to; should agree with the file appender
 # settings in server/resources/log4j.properties
diff --git 
a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/properties/solr-log4j2.xml.j2
 
b/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/properties/solr-log4j2.xml.j2
new file mode 100644
index 0000000..5b87f3d
--- /dev/null
+++ 
b/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/properties/solr-log4j2.xml.j2
@@ -0,0 +1,74 @@
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+<Configuration>
+  <Appenders>
+
+    <Console name="STDOUT" target="SYSTEM_OUT">
+      <PatternLayout>
+        <Pattern>
+          %d{ISO8601} [%t] %-5p [%X{collection} %X{shard} %X{replica} 
%X{core}] %C (%F:%L) - %m%n
+        </Pattern>
+      </PatternLayout>
+    </Console>
+
+    <RollingFile
+        name="RollingFile"
+        fileName="{{infra_solr_log_dir}}/solr.log"
+        filePattern="{{infra_solr_log_dir}}/solr.log.%i" >
+      <PatternLayout>
+        <Pattern>
+          %d{ISO8601} [%t] %-5p [%X{collection} %X{shard} %X{replica} 
%X{core}] %C (%F:%L) - %m%n
+        </Pattern>
+      </PatternLayout>
+      <Policies>
+        <OnStartupTriggeringPolicy />
+        <SizeBasedTriggeringPolicy size="{{infra_log_maxfilesize}} MB"/>
+      </Policies>
+      <DefaultRolloverStrategy max="{{infra_log_maxbackupindex}}"/>
+    </RollingFile>
+
+    <RollingFile
+        name="SlowFile"
+        fileName="{{infra_solr_log_dir}}/solr_slow_requests.log"
+        filePattern="{{infra_solr_log_dir}}/solr_slow_requests.log.%i" >
+      <PatternLayout>
+        <Pattern>
+          %d{ISO8601} [%t] %-5p [%X{collection} %X{shard} %X{replica} 
%X{core}] %C (%F:%L) - %m%n
+        </Pattern>
+      </PatternLayout>
+      <Policies>
+        <OnStartupTriggeringPolicy />
+        <SizeBasedTriggeringPolicy size="{{infra_log_maxfilesize}} MB"/>
+      </Policies>
+      <DefaultRolloverStrategy max="{{infra_log_maxbackupindex}}"/>
+    </RollingFile>
+
+  </Appenders>
+  <Loggers>
+    <Logger name="org.apache.hadoop" level="warn"/>
+    <Logger name="org.apache.solr.update.LoggingInfoStream" level="off"/>
+    <Logger name="org.apache.zookeeper" level="warn"/>
+    <Logger name="org.apache.solr.core.SolrCore.SlowRequest" level="warn" 
additivity="false">
+      <AppenderRef ref="SlowFile"/>
+    </Logger>
+
+    <Root level="warn">
+      <AppenderRef ref="RollingFile"/>
+      <!-- <AppenderRef ref="STDOUT"/> -->
+    </Root>
+  </Loggers>
+</Configuration>
\ No newline at end of file
diff --git 
a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog271Test.java
 
b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog271Test.java
index 63247da..5bf1317 100644
--- 
a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog271Test.java
+++ 
b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog271Test.java
@@ -20,6 +20,7 @@ package org.apache.ambari.server.upgrade;
 
 import static 
org.apache.ambari.server.upgrade.UpgradeCatalog271.CLUSTERS_BLUEPRINT_PROVISIONING_STATE_COLUMN;
 import static 
org.apache.ambari.server.upgrade.UpgradeCatalog271.CLUSTERS_TABLE;
+import static org.easymock.EasyMock.anyBoolean;
 import static org.easymock.EasyMock.anyObject;
 import static org.easymock.EasyMock.anyString;
 import static org.easymock.EasyMock.capture;
@@ -41,6 +42,7 @@ import java.util.Map;
 import org.apache.ambari.server.controller.AmbariManagementController;
 import org.apache.ambari.server.controller.AmbariManagementControllerImpl;
 import org.apache.ambari.server.orm.DBAccessor;
+import org.apache.ambari.server.orm.dao.DaoUtils;
 import org.apache.ambari.server.state.BlueprintProvisioningState;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
@@ -92,6 +94,7 @@ public class UpgradeCatalog271Test {
     Method updateRangerKmsDbUrl = 
UpgradeCatalog271.class.getDeclaredMethod("updateRangerKmsDbUrl");
     Method renameAmbariInfraInConfigGroups = 
UpgradeCatalog271.class.getDeclaredMethod("renameAmbariInfraInConfigGroups");
     Method removeLogSearchPatternConfigs = 
UpgradeCatalog271.class.getDeclaredMethod("removeLogSearchPatternConfigs");
+    Method updateSolrConfigurations = 
UpgradeCatalog271.class.getDeclaredMethod("updateSolrConfigurations");
 
     UpgradeCatalog271 upgradeCatalog271 = 
createMockBuilder(UpgradeCatalog271.class)
       .addMockedMethod(updateRangerKmsDbUrl)
@@ -99,6 +102,7 @@ public class UpgradeCatalog271Test {
       .addMockedMethod(addNewConfigurationsFromXml)
       .addMockedMethod(renameAmbariInfraInConfigGroups)
       .addMockedMethod(removeLogSearchPatternConfigs)
+      .addMockedMethod(updateSolrConfigurations)
       .createMock();
 
     upgradeCatalog271.addNewConfigurationsFromXml();
@@ -116,6 +120,9 @@ public class UpgradeCatalog271Test {
     upgradeCatalog271.removeLogSearchPatternConfigs();
     expectLastCall().once();
 
+    upgradeCatalog271.updateSolrConfigurations();
+    expectLastCall().once();
+
     replay(upgradeCatalog271);
     upgradeCatalog271.executeDMLUpdates();
     verify(upgradeCatalog271);
@@ -292,4 +299,61 @@ public class UpgradeCatalog271Test {
     
Assert.assertEquals(updatedRangerKmsEnvConfig.get("ranger_kms_privelege_user_jdbc_url"),
 "jdbc:mysql://c6401.ambari.apache.org:3546");
   }
 
+  @Test
+  public void testUpdateSolrConfigurations() throws Exception {
+    // GIVEN
+    EasyMockSupport easyMockSupport = new EasyMockSupport();
+
+    Clusters clusters = easyMockSupport.createNiceMock(Clusters.class);
+    final Cluster cluster = easyMockSupport.createNiceMock(Cluster.class);
+
+    Config mockedServiceLogSolrConfig = 
easyMockSupport.createNiceMock(Config.class);
+    Config mockedAudiitLogSolrConfig = 
easyMockSupport.createNiceMock(Config.class);
+    Config mockedSolrLog4JConfig = 
easyMockSupport.createNiceMock(Config.class);
+
+    Map<String, Config> allDummy = new HashMap<>();
+
+    Map<String, String> serviceLogProps = new HashMap<>();
+    serviceLogProps.put("content", 
"<luceneMatchVersion>7.3.1</luceneMatchVersion>");
+    Map<String, String> auditLogProps = new HashMap<>();
+    auditLogProps.put("content", 
"<luceneMatchVersion>7.3.1</luceneMatchVersion>");
+    Map<String, String> solrLog4jProps = new HashMap<>();
+    solrLog4jProps.put("content", "log4jContent");
+
+    Injector injector = easyMockSupport.createNiceMock(Injector.class);
+    AmbariManagementControllerImpl controller = 
createMockBuilder(AmbariManagementControllerImpl.class)
+      .addMockedMethod("createConfiguration")
+      .addMockedMethod("getClusters", new Class[] { })
+      .addMockedMethod("createConfig")
+      .createNiceMock();
+
+    DaoUtils daoUtilsMock = easyMockSupport.createNiceMock(DaoUtils.class);
+    Map<String, Cluster> clusterMap = new HashMap<>();
+    clusterMap.put("cl1", cluster);
+    
expect(injector.getInstance(AmbariManagementController.class)).andReturn(controller).anyTimes();
+    
expect(injector.getInstance(DaoUtils.class)).andReturn(daoUtilsMock).anyTimes();
+    expect(controller.getClusters()).andReturn(clusters).anyTimes();
+    expect(clusters.getClusters()).andReturn(clusterMap).anyTimes();
+    
expect(cluster.getDesiredConfigByType("logsearch-service_logs-solrconfig")).andReturn(mockedServiceLogSolrConfig);
+    
expect(cluster.getDesiredConfigByType("logsearch-audit_logs-solrconfig")).andReturn(mockedAudiitLogSolrConfig);
+    
expect(cluster.getDesiredConfigByType("infra-solr-log4j")).andReturn(mockedSolrLog4JConfig);
+    
expect(mockedServiceLogSolrConfig.getProperties()).andReturn(serviceLogProps).anyTimes();
+    
expect(mockedAudiitLogSolrConfig.getProperties()).andReturn(auditLogProps).anyTimes();
+    
expect(mockedSolrLog4JConfig.getProperties()).andReturn(solrLog4jProps).anyTimes();
+    // WHEN
+    replay(daoUtilsMock, controller, injector, clusters, cluster, 
mockedServiceLogSolrConfig, mockedAudiitLogSolrConfig, mockedSolrLog4JConfig);
+    UpgradeCatalog271 underTest = createMockBuilder(UpgradeCatalog271.class)
+      .withConstructor(Injector.class)
+      .withArgs(injector)
+      .addMockedMethod("updateConfigurationPropertiesForCluster", 
Cluster.class, String.class, Map.class, boolean.class, boolean.class)
+      .createNiceMock();
+    
underTest.updateConfigurationPropertiesForCluster(anyObject(Cluster.class), 
anyString(), anyObject(), anyBoolean(), anyBoolean());
+    expectLastCall().times(3);
+    replay(underTest);
+    underTest.updateSolrConfigurations();
+    // THEN
+    easyMockSupport.verifyAll();
+  }
+
+
 }

Reply via email to