This is an automated email from the ASF dual-hosted git repository.

xyao pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new 8849561  HDDS-3453. Use UrlConnectionFactory to handle HTTP Client 
SPNEGO for … (#903)
8849561 is described below

commit 884956103873db466505cccd7900161232e79b55
Author: Xiaoyu Yao <[email protected]>
AuthorDate: Thu May 14 09:36:12 2020 -0700

    HDDS-3453. Use UrlConnectionFactory to handle HTTP Client SPNEGO for … 
(#903)
---
 .../scm/TestStorageContainerManagerHttpServer.java |   1 +
 .../test.sh => ozonesecure-om-ha/.env}             |  31 +---
 .../compose/ozonesecure-om-ha/docker-compose.yaml  | 201 +++++++++++++++++++++
 .../main/compose/ozonesecure-om-ha/docker-config   | 125 +++++++++++++
 .../{ozonesecure => ozonesecure-om-ha}/test.sh     |  17 +-
 .../dist/src/main/compose/ozonesecure/test.sh      |   2 +
 .../dist/src/main/smoketest/spnego/web.robot       |  65 +++++++
 .../org/apache/hadoop/ozone/om/OzoneManager.java   |   3 +
 .../hadoop/ozone/om/OzoneManagerHttpServer.java    |   5 +-
 .../om/snapshot/OzoneManagerSnapshotProvider.java  | 165 ++++++-----------
 .../ozone/om/TestOzoneManagerHttpServer.java       |   1 +
 .../org/apache/hadoop/ozone/recon/ReconUtils.java  |  14 +-
 .../spi/impl/OzoneManagerServiceProviderImpl.java  |  58 +++---
 .../apache/hadoop/ozone/recon/TestReconUtils.java  |   5 +-
 .../impl/TestOzoneManagerServiceProviderImpl.java  |   5 +-
 .../apache/hadoop/ozone/freon/OmKeyGenerator.java  |   8 +-
 16 files changed, 514 insertions(+), 192 deletions(-)

diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java
index 24c1449..c21788a 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java
@@ -90,6 +90,7 @@ public class TestStorageContainerManagerHttpServer {
   }
 
   @AfterClass public static void tearDown() throws Exception {
+    connectionFactory.destroy();
     FileUtil.fullyDelete(new File(BASEDIR));
     KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
   }
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh 
b/hadoop-ozone/dist/src/main/compose/ozonesecure-om-ha/.env
old mode 100755
new mode 100644
similarity index 60%
copy from hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh
copy to hadoop-ozone/dist/src/main/compose/ozonesecure-om-ha/.env
index 8b5441a..37227ac
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh
+++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-om-ha/.env
@@ -1,4 +1,3 @@
-#!/usr/bin/env bash
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
@@ -15,30 +14,6 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
-export COMPOSE_DIR
-
-# shellcheck source=/dev/null
-source "$COMPOSE_DIR/../testlib.sh"
-
-export SECURITY_ENABLED=true
-
-start_docker_env
-
-execute_robot_test scm kinit.robot
-
-execute_robot_test scm basic
-
-execute_robot_test scm security
-
-execute_robot_test scm ozonefs/ozonefs.robot
-
-execute_robot_test s3g s3
-
-execute_robot_test scm admincli
-
-execute_robot_test scm recon
-
-stop_docker_env
-
-generate_report
+HDDS_VERSION=${hdds.version}
+HADOOP_VERSION=3
+OZONE_RUNNER_VERSION=${docker.ozone-runner.version}
diff --git 
a/hadoop-ozone/dist/src/main/compose/ozonesecure-om-ha/docker-compose.yaml 
b/hadoop-ozone/dist/src/main/compose/ozonesecure-om-ha/docker-compose.yaml
new file mode 100644
index 0000000..ba0bd19
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-om-ha/docker-compose.yaml
@@ -0,0 +1,201 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+version: "3"
+services:
+  kdc:
+    build:
+      context: ../common/docker-image/docker-krb5
+      dockerfile: Dockerfile-krb5
+    image: ozone-insecure-krb5
+    hostname: kdc
+    volumes:
+      - ../..:/opt/hadoop
+    networks:
+      ozone_net:
+        ipv4_address: 172.25.0.100
+  kms:
+    image: apache/hadoop:${HADOOP_VERSION}
+    ports:
+      - 9600:9600
+    env_file:
+      - ./docker-config
+    command: ["hadoop", "kms"]
+    networks:
+      ozone_net:
+        ipv4_address: 172.25.0.101
+  datanode1:
+    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+    volumes:
+      - ../..:/opt/hadoop
+    ports:
+      - 9864:9999
+    command: ["/opt/hadoop/bin/ozone","datanode"]
+    env_file:
+      - docker-config
+    environment:
+      KERBEROS_KEYTABS: dn HTTP
+    networks:
+      ozone_net:
+        ipv4_address: 172.25.0.102
+  datanode2:
+    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+    volumes:
+      - ../..:/opt/hadoop
+    ports:
+      - 9866:9999
+    command: ["/opt/hadoop/bin/ozone","datanode"]
+    env_file:
+      - docker-config
+    environment:
+      KERBEROS_KEYTABS: dn HTTP
+    networks:
+      ozone_net:
+        ipv4_address: 172.25.0.103
+  datanode3:
+    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+    volumes:
+      - ../..:/opt/hadoop
+    ports:
+      - 9868:9999
+    command: ["/opt/hadoop/bin/ozone","datanode"]
+    env_file:
+      - docker-config
+    environment:
+      KERBEROS_KEYTABS: dn HTTP
+    networks:
+      ozone_net:
+        ipv4_address: 172.25.0.104
+  om1:
+    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+    hostname: om1
+    volumes:
+      - ../..:/opt/hadoop
+    ports:
+      - 9880:9874
+      - 9890:9872
+      #- 18001:18001
+    environment:
+      ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION
+      KERBEROS_KEYTABS: om HTTP
+      #HADOOP_OPTS: 
"-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=*:18001"
+    env_file:
+      - ./docker-config
+    command: ["/opt/hadoop/bin/ozone","om"]
+    networks:
+      ozone_net:
+        ipv4_address: 172.25.0.111
+  om2:
+    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+    hostname: om2
+    volumes:
+      - ../..:/opt/hadoop
+    ports:
+      - 9882:9874
+      - 9892:9872
+      #- 18002:18002
+    environment:
+      ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION
+      KERBEROS_KEYTABS: om HTTP
+      #HADOOP_OPTS: 
"-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=*:18002"
+    env_file:
+      - ./docker-config
+    command: ["/opt/hadoop/bin/ozone","om"]
+    networks:
+      ozone_net:
+        ipv4_address: 172.25.0.112
+  om3:
+    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+    hostname: om3
+    volumes:
+      - ../..:/opt/hadoop
+    ports:
+      - 9884:9874
+      - 9894:9872
+      #- 18003:18003
+    environment:
+      ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION
+      KERBEROS_KEYTABS: om HTTP
+      #HADOOP_OPTS: 
"-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=*:18003"
+    env_file:
+      - ./docker-config
+    command: ["/opt/hadoop/bin/ozone","om"]
+    networks:
+      ozone_net:
+        ipv4_address: 172.25.0.113
+  s3g:
+    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+    hostname: s3g
+    volumes:
+      - ../..:/opt/hadoop
+    ports:
+      - 9878:9878
+    env_file:
+      - ./docker-config
+    command: ["/opt/hadoop/bin/ozone","s3g"]
+    environment:
+      KERBEROS_KEYTABS: s3g HTTP testuser
+    networks:
+      ozone_net:
+        ipv4_address: 172.25.0.114
+  recon:
+    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+    hostname: recon
+    volumes:
+      - ../..:/opt/hadoop
+    ports:
+      - 9888:9888
+      #- 18000:18000
+    env_file:
+      - ./docker-config
+    environment:
+      KERBEROS_KEYTABS: recon HTTP
+      #HADOOP_OPTS: 
"-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=*:18000"
+    command: ["/opt/hadoop/bin/ozone","recon"]
+    extra_hosts:
+      - "om1: 172.25.0.111"
+      - "om2: 172.25.0.112"
+      - "om3: 172.25.0.113"
+    networks:
+      ozone_net:
+        ipv4_address: 172.25.0.115
+  scm:
+    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+    hostname: scm
+    volumes:
+      - ../..:/opt/hadoop
+    ports:
+      - 9876:9876
+    env_file:
+      - docker-config
+    environment:
+      KERBEROS_KEYTABS: scm HTTP testuser testuser2
+      ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION
+      OZONE-SITE.XML_hdds.scm.safemode.min.datanode: 
"${OZONE_SAFEMODE_MIN_DATANODES:-3}"
+    command: ["/opt/hadoop/bin/ozone","scm"]
+    extra_hosts:
+      - "om1: 172.25.0.111"
+      - "om2: 172.25.0.112"
+      - "om3: 172.25.0.113"
+    networks:
+      ozone_net:
+        ipv4_address: 172.25.0.116
+networks:
+  ozone_net:
+    ipam:
+      driver: default
+      config:
+        - subnet: "172.25.0.0/24"
\ No newline at end of file
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-om-ha/docker-config 
b/hadoop-ozone/dist/src/main/compose/ozonesecure-om-ha/docker-config
new file mode 100644
index 0000000..e245b7e
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-om-ha/docker-config
@@ -0,0 +1,125 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+CORE-SITE.XML_fs.o3fs.impl=org.apache.hadoop.fs.ozone.OzoneFileSystem
+CORE-SITE.XML_fs.defaultFS=o3fs://bucket.volume.id1
+OZONE-SITE.XML_ozone.om.service.ids=id1
+OZONE-SITE.XML_ozone.om.internal.service.id=id1
+OZONE-SITE.XML_ozone.om.nodes.id1=om1,om2,om3
+OZONE-SITE.XML_ozone.om.address.id1.om1=om1
+OZONE-SITE.XML_ozone.om.address.id1.om2=om2
+OZONE-SITE.XML_ozone.om.address.id1.om3=om3
+OZONE-SITE.XML_ozone.om.http-address.id1.om1=om1
+OZONE-SITE.XML_ozone.om.http-address.id1.om2=om2
+OZONE-SITE.XML_ozone.om.http-address.id1.om3=om3
+OZONE-SITE.XML_ozone.om.ratis.enable=true
+
+OZONE-SITE.XML_ozone.om.volume.listall.allowed=false
+
+OZONE-SITE.XML_ozone.scm.container.size=1GB
+OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1
+OZONE-SITE.XML_ozone.scm.names=scm
+OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data
+OZONE-SITE.XML_ozone.scm.block.client.address=scm
+OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
+OZONE-SITE.XML_ozone.handler.type=distributed
+OZONE-SITE.XML_ozone.scm.client.address=scm
+OZONE-SITE.XML_hdds.block.token.enabled=true
+OZONE-SITE.XML_ozone.replication=3
+
+OZONE-SITE.XML_recon.om.snapshot.task.interval.delay=1m
+OZONE-SITE.XML_ozone.recon.db.dir=/data/metadata/recon
+OZONE-SITE.XML_recon.om.snapshot.task.initial.delay=20s
+OZONE-SITE.XML_ozone.recon.address=recon:9891
+
+OZONE-SITE.XML_ozone.security.enabled=true
+OZONE-SITE.XML_ozone.acl.enabled=true
+OZONE-SITE.XML_ozone.acl.authorizer.class=org.apache.hadoop.ozone.security.acl.OzoneNativeAuthorizer
+OZONE-SITE.XML_ozone.administrators="testuser/[email protected],testuser/[email protected]"
+
+OZONE-SITE.XML_hdds.datanode.dir=/data/hdds
+HDFS-SITE.XML_dfs.datanode.address=0.0.0.0:1019
+HDFS-SITE.XML_dfs.datanode.http.address=0.0.0.0:1012
+CORE-SITE.XML_dfs.data.transfer.protection=authentication
+CORE-SITE.XML_hadoop.security.authentication=kerberos
+CORE-SITE.XML_hadoop.security.auth_to_local=RULE:[2:$1@$0](.*)s/.*/root/
+CORE-SITE.XML_hadoop.security.key.provider.path=kms://http@kms:9600/kms
+
+
+OZONE-SITE.XML_hdds.scm.kerberos.principal=scm/[email protected]
+OZONE-SITE.XML_hdds.scm.kerberos.keytab.file=/etc/security/keytabs/scm.keytab
+OZONE-SITE.XML_ozone.om.kerberos.principal=om/[email protected]
+OZONE-SITE.XML_ozone.om.kerberos.keytab.file=/etc/security/keytabs/om.keytab
+OZONE-SITE.XML_ozone.recon.kerberos.keytab.file=/etc/security/keytabs/recon.keytab
+OZONE-SITE.XML_ozone.recon.kerberos.principal=recon/[email protected]
+
+HDFS-SITE.XML_dfs.datanode.kerberos.principal=dn/[email protected]
+HDFS-SITE.XML_dfs.datanode.keytab.file=/etc/security/keytabs/dn.keytab
+HDFS-SITE.XML_dfs.web.authentication.kerberos.principal=HTTP/[email protected]
+HDFS-SITE.XML_dfs.web.authentication.kerberos.keytab=/etc/security/keytabs/HTTP.keytab
+
+
+OZONE-SITE.XML_ozone.security.http.kerberos.enabled=true
+OZONE-SITE.XML_ozone.http.filter.initializers=org.apache.hadoop.security.AuthenticationFilterInitializer
+
+OZONE-SITE.XML_ozone.om.http.auth.type=kerberos
+OZONE-SITE.XML_hdds.scm.http.auth.type=kerberos
+OZONE-SITE.XML_hdds.datanode.http.auth.type=kerberos
+OZONE-SITE.XML_ozone.s3g.http.auth.type=kerberos
+OZONE-SITE.XML_ozone.recon.http.auth.type=kerberos
+
+OZONE-SITE.XML_hdds.scm.http.auth.kerberos.principal=HTTP/[email protected]
+OZONE-SITE.XML_hdds.scm.http.auth.kerberos.keytab=/etc/security/keytabs/HTTP.keytab
+OZONE-SITE.XML_ozone.om.http.auth.kerberos.principal=HTTP/[email protected]
+OZONE-SITE.XML_ozone.om.http.auth.kerberos.keytab=/etc/security/keytabs/HTTP.keytab
+OZONE-SITE.XML_hdds.datanode.http.auth.kerberos.principal=HTTP/[email protected]
+OZONE-SITE.XML_hdds.datanode.http.auth.kerberos.keytab=/etc/security/keytabs/HTTP.keytab
+OZONE-SITE.XML_ozone.s3g.http.auth.kerberos.keytab=/etc/security/keytabs/HTTP.keytab
+OZONE-SITE.XML_ozone.s3g.http.auth.kerberos.principal=HTTP/[email protected]
+OZONE-SITE.XML_ozone.recon.http.auth.kerberos.principal=HTTP/[email protected]
+OZONE-SITE.XML_ozone.recon.http.auth.kerberos.keytab=/etc/security/keytabs/HTTP.keytab
+
+CORE-SITE.XML_hadoop.http.authentication.simple.anonymous.allowed=false
+CORE-SITE.XML_hadoop.http.authentication.signature.secret.file=/etc/security/http_secret
+CORE-SITE.XML_hadoop.http.authentication.type=kerberos
+CORE-SITE.XML_hadoop.http.authentication.kerberos.principal=HTTP/[email protected]
+CORE-SITE.XML_hadoop.http.authentication.kerberos.keytab=/etc/security/keytabs/HTTP.keytab
+
+
+CORE-SITE.XML_hadoop.security.authorization=true
+HADOOP-POLICY.XML_ozone.om.security.client.protocol.acl=*
+HADOOP-POLICY.XML_hdds.security.client.datanode.container.protocol.acl=*
+HADOOP-POLICY.XML_hdds.security.client.scm.container.protocol.acl=*
+HADOOP-POLICY.XML_hdds.security.client.scm.block.protocol.acl=*
+HADOOP-POLICY.XML_hdds.security.client.scm.certificate.protocol.acl=*
+
+HDFS-SITE.XML_rpc.metrics.quantile.enable=true
+HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
+
+#Enable this variable to print out all hadoop rpc traffic to the stdout. See 
http://byteman.jboss.org/ to define your own instrumentation.
+#BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm
+
+OZONE_DATANODE_SECURE_USER=root
+SECURITY_ENABLED=true
+KEYTAB_DIR=/etc/security/keytabs
+KERBEROS_KEYSTORES=hadoop
+KERBEROS_SERVER=kdc
+JAVA_HOME=/usr/lib/jvm/jre
+JSVC_HOME=/usr/bin
+SLEEP_SECONDS=5
+KERBEROS_ENABLED=true
+
+no_proxy=om,scm,recon,s3g,kdc,localhost,127.0.0.1
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh 
b/hadoop-ozone/dist/src/main/compose/ozonesecure-om-ha/test.sh
similarity index 84%
copy from hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh
copy to hadoop-ozone/dist/src/main/compose/ozonesecure-om-ha/test.sh
index 8b5441a..8893ef6 100755
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh
+++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-om-ha/test.sh
@@ -18,26 +18,17 @@
 COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
 export COMPOSE_DIR
 
+export SECURITY_ENABLED=true
+export OM_SERVICE_ID="id1"
+
 # shellcheck source=/dev/null
 source "$COMPOSE_DIR/../testlib.sh"
 
-export SECURITY_ENABLED=true
-
 start_docker_env
 
 execute_robot_test scm kinit.robot
 
-execute_robot_test scm basic
-
-execute_robot_test scm security
-
-execute_robot_test scm ozonefs/ozonefs.robot
-
-execute_robot_test s3g s3
-
-execute_robot_test scm admincli
-
-execute_robot_test scm recon
+execute_robot_test scm freon
 
 stop_docker_env
 
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh 
b/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh
index 8b5441a..d1bdd0d 100755
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh
+++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh
@@ -39,6 +39,8 @@ execute_robot_test scm admincli
 
 execute_robot_test scm recon
 
+execute_robot_test scm spnego
+
 stop_docker_env
 
 generate_report
diff --git a/hadoop-ozone/dist/src/main/smoketest/spnego/web.robot 
b/hadoop-ozone/dist/src/main/smoketest/spnego/web.robot
new file mode 100644
index 0000000..9c4156f
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/smoketest/spnego/web.robot
@@ -0,0 +1,65 @@
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+*** Settings ***
+Documentation       Smoke test for spnego with docker-compose environments.
+Library             OperatingSystem
+Library             String
+Library             BuiltIn
+Resource            ../commonlib.robot
+Test Timeout        5 minutes
+
+*** Variables ***
+${OM_URL}       http://om:9874
+${OM_DB_CHECKPOINT_URL}       http://om:9874/dbCheckpoint
+${OM_SERVICE_LIST_URL}       http://om:9874/serviceList
+
+${SCM_URL}       http://scm:9876
+${RECON_URL}       http://recon:9888
+
+*** Keywords ***
+Verify SPNEGO enabled URL
+    [arguments]                      ${url}
+    Run Keyword if      '${SECURITY_ENABLED}' == 'true'     Execute     
kdestroy
+    ${result} =         Execute                             curl --negotiate 
-u : -v -s -I ${url}
+    Should contain      ${result}       401 Unauthorized
+
+    Run Keyword if      '${SECURITY_ENABLED}' == 'true'     Kinit test user    
 testuser     testuser.keytab
+    ${result} =         Execute                             curl --negotiate 
-u : -v -s -I ${url}
+    Should contain      ${result}       200 OK
+
+
+
+*** Test Cases ***
+Generate Freon data
+    Run Keyword if      '${SECURITY_ENABLED}' == 'true'     Kinit test user    
 testuser     testuser.keytab
+                        Execute                             ozone freon rk 
--replicationType=RATIS --numOfVolumes 1 --numOfBuckets 1 --numOfKeys 2 
--keySize 1025
+
+Test OM portal
+    Verify SPNEGO enabled URL       ${OM_URL}
+
+Test OM DB Checkpoint
+    Verify SPNEGO enabled URL       ${OM_DB_CHECKPOINT_URL}
+
+Test OM Service List
+    Verify SPNEGO enabled URL       ${OM_SERVICE_LIST_URL}
+
+Test SCM portal
+    Verify SPNEGO enabled URL       ${SCM_URL}
+
+Test Recon portal
+    Verify SPNEGO enabled URL       ${RECON_URL}
+
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
index f1318fd..5b0cfe9 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@ -1307,6 +1307,9 @@ public final class OzoneManager extends 
ServiceRuntimeInfoImpl
       if (jvmPauseMonitor != null) {
         jvmPauseMonitor.stop();
       }
+      if (omSnapshotProvider != null) {
+        omSnapshotProvider.stop();
+      }
       omState = State.STOPPED;
     } catch (Exception e) {
       LOG.error("OzoneManager stop failed.", e);
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerHttpServer.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerHttpServer.java
index 9fcd85e..cd1c085 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerHttpServer.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerHttpServer.java
@@ -34,10 +34,9 @@ public class OzoneManagerHttpServer extends BaseHttpServer {
   public OzoneManagerHttpServer(ConfigurationSource conf, OzoneManager om)
       throws IOException {
     super(conf, "ozoneManager");
-    // TODO: change back to addServlet when HDDS-3453 is fixed.
-    addInternalServlet("serviceList", OZONE_OM_SERVICE_LIST_HTTP_ENDPOINT,
+    addServlet("serviceList", OZONE_OM_SERVICE_LIST_HTTP_ENDPOINT,
         ServiceListJSONServlet.class);
-    addInternalServlet("dbCheckpoint", OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT,
+    addServlet("dbCheckpoint", OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT,
         OMDBCheckpointServlet.class);
     getWebAppContext().setAttribute(OzoneConsts.OM_CONTEXT_ATTRIBUTE, om);
   }
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OzoneManagerSnapshotProvider.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OzoneManagerSnapshotProvider.java
index 9fc1c46..8fcf4e9 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OzoneManagerSnapshotProvider.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OzoneManagerSnapshotProvider.java
@@ -21,6 +21,8 @@ package org.apache.hadoop.ozone.om.snapshot;
 import java.io.File;
 import java.io.IOException;
 import java.io.InputStream;
+import java.net.HttpURLConnection;
+import java.net.URL;
 import java.nio.file.Path;
 import java.nio.file.Paths;
 import java.util.HashMap;
@@ -31,8 +33,10 @@ import java.util.concurrent.TimeUnit;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.server.http.HttpConfig;
+import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource;
 import org.apache.hadoop.hdds.utils.db.DBCheckpoint;
 import org.apache.hadoop.hdds.utils.db.RocksDBCheckpoint;
+import org.apache.hadoop.hdfs.web.URLConnectionFactory;
 import org.apache.hadoop.ozone.om.ha.OMNodeDetails;
 
 import static java.net.HttpURLConnection.HTTP_CREATED;
@@ -40,21 +44,13 @@ import static java.net.HttpURLConnection.HTTP_OK;
 import org.apache.commons.io.FileUtils;
 import static org.apache.hadoop.ozone.OzoneConsts.OM_RATIS_SNAPSHOT_INDEX;
 import static org.apache.hadoop.ozone.OzoneConsts.OM_RATIS_SNAPSHOT_TERM;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTP_AUTH_TYPE;
 import static 
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_PROVIDER_CONNECTION_TIMEOUT_DEFAULT;
 import static 
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_PROVIDER_CONNECTION_TIMEOUT_KEY;
 import static 
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_PROVIDER_REQUEST_TIMEOUT_DEFAULT;
 import static 
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_PROVIDER_REQUEST_TIMEOUT_KEY;
-import static 
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_PROVIDER_SOCKET_TIMEOUT_DEFAULT;
-import static 
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_PROVIDER_SOCKET_TIMEOUT_KEY;
-import org.apache.http.Header;
-import org.apache.http.HttpEntity;
-import org.apache.http.HttpResponse;
-import org.apache.http.client.HttpClient;
-import org.apache.http.client.config.RequestConfig;
-import org.apache.http.client.methods.HttpGet;
-import org.apache.http.impl.client.CloseableHttpClient;
-import org.apache.http.impl.client.HttpClientBuilder;
-import org.apache.http.util.EntityUtils;
+
+import org.apache.hadoop.security.SecurityUtil;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -70,8 +66,8 @@ public class OzoneManagerSnapshotProvider {
   private final File omSnapshotDir;
   private Map<String, OMNodeDetails> peerNodesMap;
   private final HttpConfig.Policy httpPolicy;
-  private final RequestConfig httpRequestConfig;
-  private CloseableHttpClient httpClient;
+  private final boolean spnegoEnabled;
+  private final URLConnectionFactory connectionFactory;
 
   private static final String OM_SNAPSHOT_DB = "om.snapshot.db";
 
@@ -87,16 +83,8 @@ public class OzoneManagerSnapshotProvider {
     }
 
     this.httpPolicy = HttpConfig.getHttpPolicy(conf);
-    this.httpRequestConfig = getHttpRequestConfig(conf);
-  }
-
-  private RequestConfig getHttpRequestConfig(ConfigurationSource conf) {
-    TimeUnit socketTimeoutUnit =
-        OZONE_OM_SNAPSHOT_PROVIDER_SOCKET_TIMEOUT_DEFAULT.getUnit();
-    int socketTimeoutMS = (int) conf.getTimeDuration(
-        OZONE_OM_SNAPSHOT_PROVIDER_SOCKET_TIMEOUT_KEY,
-        OZONE_OM_SNAPSHOT_PROVIDER_SOCKET_TIMEOUT_DEFAULT.getDuration(),
-        socketTimeoutUnit);
+    this.spnegoEnabled = conf.get(OZONE_OM_HTTP_AUTH_TYPE, "simple")
+        .equals("kerberos");
 
     TimeUnit connectionTimeoutUnit =
         OZONE_OM_SNAPSHOT_PROVIDER_CONNECTION_TIMEOUT_DEFAULT.getUnit();
@@ -112,36 +100,9 @@ public class OzoneManagerSnapshotProvider {
         OZONE_OM_SNAPSHOT_PROVIDER_REQUEST_TIMEOUT_DEFAULT.getDuration(),
         requestTimeoutUnit);
 
-    RequestConfig requestConfig = RequestConfig.custom()
-        .setSocketTimeout(socketTimeoutMS)
-        .setConnectTimeout(connectionTimeoutMS)
-        .setConnectionRequestTimeout(requestTimeoutMS)
-        .build();
-
-    return requestConfig;
-  }
-
-  /**
-   * Create and return http client object.
-   */
-  private HttpClient getHttpClient() {
-    if (httpClient == null) {
-      httpClient = HttpClientBuilder
-          .create()
-          .setDefaultRequestConfig(httpRequestConfig)
-          .build();
-    }
-    return httpClient;
-  }
-
-  /**
-   * Close http client object.
-   */
-  private void closeHttpClient() throws IOException {
-    if (httpClient != null) {
-      httpClient.close();
-      httpClient = null;
-    }
+    connectionFactory = URLConnectionFactory
+      .newDefaultURLConnectionFactory(connectionTimeoutMS, requestTimeoutMS,
+            LegacyHadoopConfigurationSource.asHadoopConfiguration(conf));
   }
 
   /**
@@ -159,61 +120,55 @@ public class OzoneManagerSnapshotProvider {
 
     LOG.info("Downloading latest checkpoint from Leader OM {}. Checkpoint " +
         "URL: {}", leaderOMNodeID, omCheckpointUrl);
-
-    try {
-      HttpGet httpGet = new HttpGet(omCheckpointUrl);
-      HttpResponse response = getHttpClient().execute(httpGet);
-      int errorCode = response.getStatusLine().getStatusCode();
-      HttpEntity entity = response.getEntity();
-
-      if ((errorCode == HTTP_OK) || (errorCode == HTTP_CREATED)) {
-
-        Header header = response.getFirstHeader(OM_RATIS_SNAPSHOT_INDEX);
-        if (header == null) {
-          throw new IOException("The HTTP response header " +
-              OM_RATIS_SNAPSHOT_INDEX + " is missing.");
-        }
-
-        long snapshotIndex = Long.parseLong(header.getValue());
-
-        header = response.getFirstHeader(OM_RATIS_SNAPSHOT_TERM);
-        if (header == null) {
-          throw new IOException("The HTTP response header " +
-              OM_RATIS_SNAPSHOT_TERM + " is missing.");
-        }
-
-        long snapshotTerm = Long.parseLong(header.getValue());
-
-        try (InputStream inputStream = entity.getContent()) {
-          FileUtils.copyInputStreamToFile(inputStream, targetFile);
-        }
-
-        // Untar the checkpoint file.
-        Path untarredDbDir = Paths.get(omSnapshotDir.getAbsolutePath(),
-            snapshotFileName);
-        FileUtil.unTar(targetFile, untarredDbDir.toFile());
-        FileUtils.deleteQuietly(targetFile);
-
-        LOG.info("Sucessfully downloaded latest checkpoint with snapshot " +
-            "index {} from leader OM: {}",  snapshotIndex, leaderOMNodeID);
-
-        RocksDBCheckpoint omCheckpoint = new RocksDBCheckpoint(untarredDbDir);
-        omCheckpoint.setRatisSnapshotIndex(snapshotIndex);
-        omCheckpoint.setRatisSnapshotTerm(snapshotTerm);
-        return omCheckpoint;
-      }
-
-      if (entity != null) {
+    final long[] snapshotIndex = new long[1];
+    final long[] snapshotTerm = new long[1];
+    SecurityUtil.doAsCurrentUser(() -> {
+      HttpURLConnection httpURLConnection = (HttpURLConnection)
+          connectionFactory.openConnection(new URL(omCheckpointUrl),
+              spnegoEnabled);
+      httpURLConnection.connect();
+      int errorCode = httpURLConnection.getResponseCode();
+      if ((errorCode != HTTP_OK) && (errorCode != HTTP_CREATED)) {
         throw new IOException("Unexpected exception when trying to reach " +
             "OM to download latest checkpoint. Checkpoint URL: " +
-            omCheckpointUrl + ". Entity: " + EntityUtils.toString(entity));
-      } else {
-        throw new IOException("Unexpected null in http payload, while " +
-            "processing request to OM to download latest checkpoint. " +
-            "Checkpoint Url: " + omCheckpointUrl);
+            omCheckpointUrl + ". ErrorCode: " + errorCode);
+      }
+      snapshotIndex[0] = httpURLConnection.getHeaderFieldLong(
+          OM_RATIS_SNAPSHOT_INDEX, -1);
+      if (snapshotIndex[0] == -1) {
+        throw new IOException("The HTTP response header " +
+            OM_RATIS_SNAPSHOT_INDEX + " is missing.");
+      }
+      snapshotTerm[0] = httpURLConnection.getHeaderFieldLong(
+          OM_RATIS_SNAPSHOT_TERM, -1);
+      if (snapshotTerm[0] == -1) {
+        throw new IOException("The HTTP response header " +
+            OM_RATIS_SNAPSHOT_TERM + " is missing.");
       }
-    } finally {
-      closeHttpClient();
+
+      try (InputStream inputStream = httpURLConnection.getInputStream()) {
+        FileUtils.copyInputStreamToFile(inputStream, targetFile);
+      }
+      return null;
+    });
+    // Untar the checkpoint file.
+    Path untarredDbDir = Paths.get(omSnapshotDir.getAbsolutePath(),
+        snapshotFileName);
+    FileUtil.unTar(targetFile, untarredDbDir.toFile());
+    FileUtils.deleteQuietly(targetFile);
+
+    LOG.info("Sucessfully downloaded latest checkpoint with snapshot " +
+        "index {} from leader OM: {}", snapshotIndex[0], leaderOMNodeID);
+
+    RocksDBCheckpoint omCheckpoint = new RocksDBCheckpoint(untarredDbDir);
+    omCheckpoint.setRatisSnapshotIndex(snapshotIndex[0]);
+    omCheckpoint.setRatisSnapshotTerm(snapshotTerm[0]);
+    return omCheckpoint;
+  }
+
+  public void stop() {
+    if (connectionFactory != null) {
+      connectionFactory.destroy();
     }
   }
 }
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHttpServer.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHttpServer.java
index c7a8f20..2d0a72b 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHttpServer.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHttpServer.java
@@ -88,6 +88,7 @@ public class TestOzoneManagerHttpServer {
   }
 
   @AfterClass public static void tearDown() throws Exception {
+    connectionFactory.destroy();
     FileUtil.fullyDelete(new File(BASEDIR));
     KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
   }
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java
index a29a35e..cba7428 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java
@@ -44,6 +44,8 @@ import 
org.apache.commons.compress.compressors.gzip.GzipCompressorInputStream;
 import static org.apache.hadoop.hdds.server.ServerUtils.getDirectoryFromConfig;
 import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath;
 import static 
org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_SCM_DB_DIR;
+
+import 
org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -214,17 +216,17 @@ public class ReconUtils {
 
   /**
    * Make HTTP GET call on the URL and return inputstream to the response.
-   * @param httpClient HttpClient to use.
+   * @param connectionFactory URLConnectionFactory to use.
    * @param url url to call
+   * @param isSpnego is SPNEGO enabled
    * @return Inputstream to the response of the HTTP call.
-   * @throws IOException While reading the response.
+   * @throws IOException, AuthenticationException While reading the response.
    */
   public InputStream makeHttpCall(URLConnectionFactory connectionFactory,
-                                  String url)
-      throws IOException {
-
+                                  String url, boolean isSpnego)
+      throws IOException, AuthenticationException {
     URLConnection urlConnection =
-          connectionFactory.openConnection(new URL(url));
+          connectionFactory.openConnection(new URL(url), isSpnego);
     urlConnection.connect();
     return urlConnection.getInputStream();
   }
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java
index b17944a..94c9520 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java
@@ -51,12 +51,14 @@ import 
org.apache.hadoop.ozone.recon.spi.OzoneManagerServiceProvider;
 import org.apache.hadoop.ozone.recon.tasks.OMDBUpdatesHandler;
 import org.apache.hadoop.ozone.recon.tasks.OMUpdateEventBatch;
 import org.apache.hadoop.ozone.recon.tasks.ReconTaskController;
+import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.util.Time;
 
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.io.FileUtils;
 import static 
org.apache.hadoop.ozone.OzoneConsts.OZONE_DB_CHECKPOINT_REQUEST_FLUSH;
 import static 
org.apache.hadoop.ozone.OzoneConsts.OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTP_AUTH_TYPE;
 import static 
org.apache.hadoop.ozone.recon.ReconConstants.RECON_OM_SNAPSHOT_DB;
 import static 
org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_OM_SNAPSHOT_DB_DIR;
 import static 
org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_CONNECTION_REQUEST_TIMEOUT;
@@ -68,11 +70,6 @@ import static 
org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_SNAPS
 import static 
org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_SNAPSHOT_TASK_INITIAL_DELAY_DEFAULT;
 import static 
org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_SNAPSHOT_TASK_INTERVAL;
 import static 
org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_SNAPSHOT_TASK_INTERVAL_DEFAULT;
-import static 
org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_SOCKET_TIMEOUT;
-import static 
org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_SOCKET_TIMEOUT_DEFAULT;
-import org.apache.http.client.config.RequestConfig;
-import org.apache.http.impl.client.CloseableHttpClient;
-import org.apache.http.impl.client.HttpClientBuilder;
 import static org.apache.ratis.proto.RaftProtos.RaftPeerRole.LEADER;
 import org.hadoop.ozone.recon.schema.tables.daos.ReconTaskStatusDao;
 import org.hadoop.ozone.recon.schema.tables.pojos.ReconTaskStatus;
@@ -94,7 +91,6 @@ public class OzoneManagerServiceProviderImpl
       LoggerFactory.getLogger(OzoneManagerServiceProviderImpl.class);
   private URLConnectionFactory connectionFactory;
 
-  private final CloseableHttpClient httpClient;
   private File omSnapshotDBParentDir = null;
   private String omDBSnapshotUrl;
 
@@ -124,8 +120,17 @@ public class OzoneManagerServiceProviderImpl
       ReconUtils reconUtils,
       OzoneManagerProtocol ozoneManagerClient) {
 
+    int connectionTimeout = (int) configuration.getTimeDuration(
+        RECON_OM_CONNECTION_TIMEOUT,
+        RECON_OM_CONNECTION_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS);
+    int connectionRequestTimeout = (int)configuration.getTimeDuration(
+        RECON_OM_CONNECTION_REQUEST_TIMEOUT,
+        RECON_OM_CONNECTION_REQUEST_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS);
+
     connectionFactory =
-        URLConnectionFactory.newDefaultURLConnectionFactory(configuration);
+        URLConnectionFactory.newDefaultURLConnectionFactory(connectionTimeout,
+            connectionRequestTimeout, configuration);
+
     String ozoneManagerHttpAddress = configuration.get(OMConfigKeys
         .OZONE_OM_HTTP_ADDRESS_KEY);
 
@@ -137,26 +142,6 @@ public class OzoneManagerServiceProviderImpl
 
     HttpConfig.Policy policy = HttpConfig.getHttpPolicy(configuration);
 
-    int socketTimeout = (int) configuration.getTimeDuration(
-        RECON_OM_SOCKET_TIMEOUT, RECON_OM_SOCKET_TIMEOUT_DEFAULT,
-            TimeUnit.MILLISECONDS);
-    int connectionTimeout = (int) configuration.getTimeDuration(
-        RECON_OM_CONNECTION_TIMEOUT,
-        RECON_OM_CONNECTION_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS);
-    int connectionRequestTimeout = (int)configuration.getTimeDuration(
-        RECON_OM_CONNECTION_REQUEST_TIMEOUT,
-        RECON_OM_CONNECTION_REQUEST_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS);
-
-    RequestConfig config = RequestConfig.custom()
-        .setConnectTimeout(socketTimeout)
-        .setConnectionRequestTimeout(connectionTimeout)
-        .setSocketTimeout(connectionRequestTimeout).build();
-
-    httpClient = HttpClientBuilder
-        .create()
-        .setDefaultRequestConfig(config)
-        .build();
-
     omDBSnapshotUrl = "http://"; + ozoneManagerHttpAddress +
         OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT;
 
@@ -246,6 +231,7 @@ public class OzoneManagerServiceProviderImpl
     omMetadataManager.stop();
     scheduler.shutdownNow();
     metrics.unRegister();
+    connectionFactory.destroy();
   }
 
   /**
@@ -276,6 +262,11 @@ public class OzoneManagerServiceProviderImpl
     return omLeaderUrl;
   }
 
+  private boolean isOmSpengoEnabled() {
+    return configuration.get(OZONE_OM_HTTP_AUTH_TYPE, "simple")
+        .equals("kerberos");
+  }
+
   /**
    * Method to obtain current OM DB Snapshot.
    * @return DBCheckpoint instance.
@@ -287,11 +278,14 @@ public class OzoneManagerServiceProviderImpl
     File targetFile = new File(omSnapshotDBParentDir, snapshotFileName +
         ".tar.gz");
     try {
-      try (InputStream inputStream = reconUtils.makeHttpCall(connectionFactory,
-          getOzoneManagerSnapshotUrl())) {
-        FileUtils.copyInputStreamToFile(inputStream, targetFile);
-      }
-
+      SecurityUtil.doAsLoginUser(() -> {
+        try (InputStream inputStream = reconUtils.makeHttpCall(
+            connectionFactory, getOzoneManagerSnapshotUrl(),
+            isOmSpengoEnabled())) {
+          FileUtils.copyInputStreamToFile(inputStream, targetFile);
+        }
+        return null;
+      });
       // Untar the checkpoint file.
       Path untarredDbDir = Paths.get(omSnapshotDBParentDir.getAbsolutePath(),
           snapshotFileName);
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java
index 6dea85b..0951299 100644
--- 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java
@@ -22,6 +22,7 @@ import static 
org.apache.hadoop.ozone.recon.ReconUtils.createTarFile;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.anyBoolean;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
@@ -148,10 +149,10 @@ public class TestReconUtils {
         mock(URLConnectionFactory.class);
     URLConnection urlConnectionMock = mock(URLConnection.class);
     when(urlConnectionMock.getInputStream()).thenReturn(fileInputStream);
-    when(connectionFactoryMock.openConnection(any(URL.class)))
+    when(connectionFactoryMock.openConnection(any(URL.class), anyBoolean()))
         .thenReturn(urlConnectionMock);
     try (InputStream inputStream = new ReconUtils()
-        .makeHttpCall(connectionFactoryMock, url)) {
+        .makeHttpCall(connectionFactoryMock, url, false)) {
       contents = IOUtils.toString(inputStream, Charset.defaultCharset());
     }
 
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestOzoneManagerServiceProviderImpl.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestOzoneManagerServiceProviderImpl.java
index 9b698fa..e989914 100644
--- 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestOzoneManagerServiceProviderImpl.java
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestOzoneManagerServiceProviderImpl.java
@@ -31,6 +31,7 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.anyBoolean;
 import static org.mockito.ArgumentMatchers.anyString;
 import static org.mockito.Mockito.doCallRealMethod;
 import static org.mockito.Mockito.doNothing;
@@ -112,7 +113,7 @@ public class TestOzoneManagerServiceProviderImpl {
     File tarFile = createTarFile(checkpoint.getCheckpointLocation());
     InputStream inputStream = new FileInputStream(tarFile);
     ReconUtils reconUtilsMock = getMockReconUtils();
-    when(reconUtilsMock.makeHttpCall(any(), anyString()))
+    when(reconUtilsMock.makeHttpCall(any(), anyString(), anyBoolean()))
         .thenReturn(inputStream);
 
     ReconTaskController reconTaskController = getMockTaskController();
@@ -163,7 +164,7 @@ public class TestOzoneManagerServiceProviderImpl {
     File tarFile = createTarFile(checkpointDir.toPath());
     InputStream fileInputStream = new FileInputStream(tarFile);
     ReconUtils reconUtilsMock = getMockReconUtils();
-    when(reconUtilsMock.makeHttpCall(any(), anyString()))
+    when(reconUtilsMock.makeHttpCall(any(), anyString(), anyBoolean()))
         .thenReturn(fileInputStream);
 
     ReconOMMetadataManager reconOMMetadataManager =
diff --git 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmKeyGenerator.java
 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmKeyGenerator.java
index a0a135b..8c37659 100644
--- 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmKeyGenerator.java
+++ 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmKeyGenerator.java
@@ -27,12 +27,16 @@ import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs.Builder;
 import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
+import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
 import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
 
 import com.codahale.metrics.Timer;
+import org.apache.hadoop.security.UserGroupInformation;
 import picocli.CommandLine.Command;
 import picocli.CommandLine.Option;
 
+import static 
org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.ALL;
+
 /**
  * Data generator tool test om performance.
  */
@@ -99,7 +103,7 @@ public class OmKeyGenerator extends BaseFreonGenerator
   }
 
   private void createKey(long counter) throws Exception {
-
+    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
     OmKeyArgs keyArgs = new Builder()
         .setBucketName(bucketName)
         .setVolumeName(volumeName)
@@ -107,6 +111,8 @@ public class OmKeyGenerator extends BaseFreonGenerator
         .setFactor(factor)
         .setKeyName(generateObjectName(counter))
         .setLocationInfoList(new ArrayList<>())
+        .setAcls(OzoneAclUtil.getAclList(ugi.getUserName(), ugi.getGroups(),
+            ALL, ALL))
         .build();
 
     timer.time(() -> {


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to