This is an automated email from the ASF dual-hosted git repository.
alexey pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/kudu.git
The following commit(s) were added to refs/heads/master by this push:
new fa6d5a2 [mini_cluster] small fix on Mini{Chronyd,Hms}::Start()
fa6d5a2 is described below
commit fa6d5a228e7d27e7bc8749d425596fecd8c8ca1e
Author: Alexey Serbin <[email protected]>
AuthorDate: Tue Oct 1 21:54:21 2019 -0700
[mini_cluster] small fix on Mini{Chronyd,Hms}::Start()
This patch makes it possible to start ExternalMiniCluster on a directory
structure originally created by an earlier ExternalMiniCluster, provided
the same set of master RPC endpoints are used in both cases.
This functionality is useful for test scenarios like
MasterMigrationTest.TestEndToEndMigration, when a Kudu master catalog
is preserved between cluster's restarts.
The motivation for this patch was seeing the mentioned test scenario
failing if running with the built-in NTP. That's because
ExternalMiniCluster does the ephemeral port binding, and then hands off
the port to mini_chronyd. Since the port binding is ephemeral, next
start usually gets different port bound. However, upon next start,
mini_chronyd wasn't recreating the configuration file for chronyd
as necessary in MiniChronyd::Start(). I also found that running
the scenario with HMS enabled would fail as well, so I updated
MiniHms::Start() accordingly.
In addition, I updated the ExternalMiniClusterTest.TestBasicOperation
test scenario to cover the updated functionality.
Change-Id: I014eac84386c4d6e4f025afc1082e6a229a97454
Reviewed-on: http://gerrit.cloudera.org:8080/14345
Tested-by: Alexey Serbin <[email protected]>
Reviewed-by: Alexey Serbin <[email protected]>
---
src/kudu/clock/test/mini_chronyd.cc | 4 +-
src/kudu/hms/mini_hms.cc | 13 ++-
src/kudu/hms/mini_hms.h | 5 +-
.../mini-cluster/external_mini_cluster-test.cc | 121 +++++++++++++--------
4 files changed, 90 insertions(+), 53 deletions(-)
diff --git a/src/kudu/clock/test/mini_chronyd.cc
b/src/kudu/clock/test/mini_chronyd.cc
index ecb9624..7f1d09f 100644
--- a/src/kudu/clock/test/mini_chronyd.cc
+++ b/src/kudu/clock/test/mini_chronyd.cc
@@ -184,15 +184,15 @@ Status MiniChronyd::Start() {
VLOG(1) << "starting chronyd: " << options_.ToString();
if (!Env::Default()->FileExists(options_.data_root)) {
- VLOG(1) << "creating chronyd configuration file";
RETURN_NOT_OK(Env::Default()->CreateDir(options_.data_root));
// The chronyd's implementation puts strict requirements on the ownership
// of the directories where the runtime data is stored. In some
environments
// (e.g., macOS), the group owner of the newly created directory might be
// different from the user account's GID.
RETURN_NOT_OK(CorrectOwnership(options_.data_root));
- RETURN_NOT_OK(CreateConf());
}
+ VLOG(1) << "creating chronyd configuration file";
+ RETURN_NOT_OK(CreateConf());
// Start the chronyd in server-only mode, not detaching from terminal
// since the Subprocess needs to have the process running in foreground
diff --git a/src/kudu/hms/mini_hms.cc b/src/kudu/hms/mini_hms.cc
index 1174229..ad63331 100644
--- a/src/kudu/hms/mini_hms.cc
+++ b/src/kudu/hms/mini_hms.cc
@@ -158,13 +158,15 @@ Status MiniHms::Start() {
{ "HADOOP_OS_TYPE", "Linux" }
};
- if (!schema_initialized_) {
- // Run the schematool to initialize the database.
+ // Run the schematool to initialize the database if not yet initialized.
+ // Instead of running slow 'schematool -dbType derby -info' to check whether
+ // the database has been created already, a faster way is to check whether
+ // Derby's database sub-directory exists.
+ if (!Env::Default()->FileExists(JoinPathSegments(data_root_,
metadb_subdir_))) {
RETURN_NOT_OK(Subprocess::Call({Substitute("$0/bin/schematool", hive_home),
"-dbType", "derby", "-initSchema"}, "",
nullptr, nullptr,
env_vars));
- schema_initialized_ = true;
}
// Start the HMS.
@@ -271,7 +273,7 @@ Status MiniHms::CreateHiveSite() const {
<property>
<name>javax.jdo.option.ConnectionURL</name>
- <value>jdbc:derby:$2/metadb;create=true</value>
+ <value>jdbc:derby:$2/$9;create=true</value>
</property>
<property>
@@ -383,7 +385,8 @@ Status MiniHms::CreateHiveSite() const {
service_principal_,
SaslProtection::name_of(protection_),
JoinPathSegments(data_root_,
"hive-log4j2.properties"),
- sentry_properties);
+ sentry_properties,
+ metadb_subdir_);
if (IsAuthorizationEnabled()) {
// - hive.sentry.server
diff --git a/src/kudu/hms/mini_hms.h b/src/kudu/hms/mini_hms.h
index af30bdc..666d05a 100644
--- a/src/kudu/hms/mini_hms.h
+++ b/src/kudu/hms/mini_hms.h
@@ -115,6 +115,8 @@ class MiniHms {
uint16_t port_ = 0;
std::string data_root_;
+ // Sub-directory under 'data_root_' where Derby keeps its database.
+ std::string metadb_subdir_ = "metadb";
// Kerberos configuration
std::string krb5_conf_;
@@ -130,9 +132,6 @@ class MiniHms {
// Whether to enable the Kudu listener plugin.
bool enable_kudu_plugin_ = true;
-
- // Whether the Hive metastore schema has been initialized.
- bool schema_initialized_ = false;
};
} // namespace hms
diff --git a/src/kudu/mini-cluster/external_mini_cluster-test.cc
b/src/kudu/mini-cluster/external_mini_cluster-test.cc
index 8b3acb3..66faa32 100644
--- a/src/kudu/mini-cluster/external_mini_cluster-test.cc
+++ b/src/kudu/mini-cluster/external_mini_cluster-test.cc
@@ -18,6 +18,7 @@
#include "kudu/mini-cluster/external_mini_cluster.h"
#include <iosfwd>
+#include <memory>
#include <ostream>
#include <string>
#include <utility>
@@ -42,6 +43,7 @@
using std::string;
using std::tuple;
+using std::unique_ptr;
using std::vector;
using strings::Substitute;
@@ -135,58 +137,39 @@ void
SmokeTestKerberizedCluster(ExternalMiniClusterOptions opts) {
cluster.Shutdown();
}
-TEST_F(ExternalMiniClusterTest, TestKerberosReacquire) {
- SKIP_IF_SLOW_NOT_ALLOWED();
-
- ExternalMiniClusterOptions opts;
- opts.enable_kerberos = true;
- // Set the kerberos ticket lifetime as 15 seconds to force ticket
reacquisition every 15 seconds.
- // Note that we do not renew tickets but always acquire a new one.
- opts.mini_kdc_options.ticket_lifetime = "15s";
- opts.num_tablet_servers = 1;
-
- NO_FATALS(SmokeTestKerberizedCluster(std::move(opts)));
-}
-
-TEST_P(ExternalMiniClusterTest, TestBasicOperation) {
- SKIP_IF_SLOW_NOT_ALLOWED();
-
- ExternalMiniClusterOptions opts;
- const auto& param = GetParam();
- opts.enable_kerberos = std::get<0>(param) == Kerberos::ENABLED;
- if (std::get<1>(param) == HiveMetastore::ENABLED) {
- opts.hms_mode = HmsMode::ENABLE_HIVE_METASTORE;
- }
- opts.num_ntp_servers = std::get<2>(param);
+void SmokeExternalMiniCluster(const ExternalMiniClusterOptions& opts,
+ ExternalMiniCluster* cluster,
+ vector<HostPort>* master_rpc_addresses) {
+ CHECK(cluster);
+ CHECK(master_rpc_addresses);
+ master_rpc_addresses->clear();
- opts.num_masters = 3;
- opts.num_tablet_servers = 3;
-
- ExternalMiniCluster cluster(opts);
- ASSERT_OK(cluster.Start());
+ ASSERT_OK(cluster->Start());
// Verify each of the masters.
for (int i = 0; i < opts.num_masters; i++) {
SCOPED_TRACE(i);
- ExternalMaster* master = CHECK_NOTNULL(cluster.master(i));
- HostPort master_rpc = master->bound_rpc_hostport();
- string expected_prefix = Substitute("$0:", cluster.GetBindIpForMaster(i));
- if (cluster.bind_mode() == BindMode::UNIQUE_LOOPBACK) {
+ ExternalMaster* master = CHECK_NOTNULL(cluster->master(i));
+ HostPort master_endpoint = master->bound_rpc_hostport();
+ string expected_prefix = Substitute("$0:", cluster->GetBindIpForMaster(i));
+ if (cluster->bind_mode() == BindMode::UNIQUE_LOOPBACK) {
EXPECT_NE(expected_prefix, "127.0.0.1:") << "Should bind to unique
per-server hosts";
}
- EXPECT_TRUE(HasPrefixString(master_rpc.ToString(), expected_prefix)) <<
master_rpc.ToString();
+ EXPECT_TRUE(HasPrefixString(master_endpoint.ToString(), expected_prefix))
+ << master_endpoint.ToString();
HostPort master_http = master->bound_http_hostport();
EXPECT_TRUE(HasPrefixString(master_http.ToString(), expected_prefix)) <<
master_http.ToString();
+ master_rpc_addresses->emplace_back(std::move(master_endpoint));
}
// Verify each of the tablet servers.
for (int i = 0; i < opts.num_tablet_servers; i++) {
SCOPED_TRACE(i);
- ExternalTabletServer* ts = CHECK_NOTNULL(cluster.tablet_server(i));
+ ExternalTabletServer* ts = CHECK_NOTNULL(cluster->tablet_server(i));
HostPort ts_rpc = ts->bound_rpc_hostport();
- string expected_prefix = Substitute("$0:",
cluster.GetBindIpForTabletServer(i));
- if (cluster.bind_mode() == BindMode::UNIQUE_LOOPBACK) {
+ string expected_prefix = Substitute("$0:",
cluster->GetBindIpForTabletServer(i));
+ if (cluster->bind_mode() == BindMode::UNIQUE_LOOPBACK) {
EXPECT_NE(expected_prefix, "127.0.0.1:") << "Should bind to unique
per-server hosts";
}
EXPECT_TRUE(HasPrefixString(ts_rpc.ToString(), expected_prefix)) <<
ts_rpc.ToString();
@@ -196,10 +179,10 @@ TEST_P(ExternalMiniClusterTest, TestBasicOperation) {
}
// Ensure that all of the tablet servers can register with the masters.
- ASSERT_OK(cluster.WaitForTabletServerCount(opts.num_tablet_servers,
MonoDelta::FromSeconds(30)));
+ ASSERT_OK(cluster->WaitForTabletServerCount(opts.num_tablet_servers,
MonoDelta::FromSeconds(30)));
// Restart a master and a tablet server. Make sure they come back up with
the same ports.
- ExternalMaster* master = cluster.master(0);
+ ExternalMaster* master = cluster->master(0);
HostPort master_rpc = master->bound_rpc_hostport();
HostPort master_http = master->bound_http_hostport();
@@ -209,7 +192,7 @@ TEST_P(ExternalMiniClusterTest, TestBasicOperation) {
ASSERT_EQ(master_rpc.ToString(), master->bound_rpc_hostport().ToString());
ASSERT_EQ(master_http.ToString(), master->bound_http_hostport().ToString());
- ExternalTabletServer* ts = cluster.tablet_server(0);
+ ExternalTabletServer* ts = cluster->tablet_server(0);
HostPort ts_rpc = ts->bound_rpc_hostport();
HostPort ts_http = ts->bound_http_hostport();
@@ -225,7 +208,7 @@ TEST_P(ExternalMiniClusterTest, TestBasicOperation) {
thrift::ClientOptions hms_client_opts;
hms_client_opts.enable_kerberos = opts.enable_kerberos;
hms_client_opts.service_principal = "hive";
- hms::HmsClient hms_client(cluster.hms()->address(), hms_client_opts);
+ hms::HmsClient hms_client(cluster->hms()->address(), hms_client_opts);
ASSERT_OK(hms_client.Start());
vector<string> tables;
ASSERT_OK(hms_client.GetTableNames("default", &tables));
@@ -235,8 +218,8 @@ TEST_P(ExternalMiniClusterTest, TestBasicOperation) {
// Verify that, in a Kerberized cluster, if we drop our Kerberos environment,
// we can't make RPCs to a server.
if (opts.enable_kerberos) {
- ASSERT_OK(cluster.kdc()->Kdestroy());
- Status s = cluster.SetFlag(ts, "foo", "bar");
+ ASSERT_OK(cluster->kdc()->Kdestroy());
+ Status s = cluster->SetFlag(ts, "foo", "bar");
// The error differs depending on the version of Kerberos, so we match
// either message.
ASSERT_STR_CONTAINS(s.ToString(),
@@ -256,8 +239,60 @@ TEST_P(ExternalMiniClusterTest, TestBasicOperation) {
ts->Shutdown();
ASSERT_OK(ts->Restart());
ASSERT_TRUE(ts->IsProcessAlive());
+}
- cluster.Shutdown();
+TEST_F(ExternalMiniClusterTest, TestKerberosReacquire) {
+ SKIP_IF_SLOW_NOT_ALLOWED();
+
+ ExternalMiniClusterOptions opts;
+ opts.enable_kerberos = true;
+ // Set the kerberos ticket lifetime as 15 seconds to force ticket
reacquisition every 15 seconds.
+ // Note that we do not renew tickets but always acquire a new one.
+ opts.mini_kdc_options.ticket_lifetime = "15s";
+ opts.num_tablet_servers = 1;
+
+ NO_FATALS(SmokeTestKerberizedCluster(std::move(opts)));
+}
+
+TEST_P(ExternalMiniClusterTest, TestBasicOperation) {
+ SKIP_IF_SLOW_NOT_ALLOWED();
+
+ ExternalMiniClusterOptions opts;
+ const auto& param = GetParam();
+ opts.enable_kerberos = std::get<0>(param) == Kerberos::ENABLED;
+ if (std::get<1>(param) == HiveMetastore::ENABLED) {
+ opts.hms_mode = HmsMode::ENABLE_HIVE_METASTORE;
+ }
+ opts.num_ntp_servers = std::get<2>(param);
+
+ opts.num_masters = 3;
+ opts.num_tablet_servers = 3;
+
+ unique_ptr<ExternalMiniCluster> cluster(new ExternalMiniCluster(opts));
+ vector<HostPort> master_rpc_addresses;
+ NO_FATALS(SmokeExternalMiniCluster(opts, cluster.get(),
&master_rpc_addresses));
+
+ // Destroy the cluster object, create a new one with the same options,
+ // and run the same scenario again at already existing data directory
+ // structure.
+ // This is to make sure that:
+ // * the cluster's components are shutdown upon the destruction
+ // of the object
+ // * configuration files and other persistent data for cluster components
+ // are either reused or rewritten/recreated in consistent manner
+ // The only cluster options to preserve is the masters' RPC addresses from
+ // the prior run.
+ opts.master_rpc_addresses = master_rpc_addresses;
+ cluster.reset(new ExternalMiniCluster(opts));
+ NO_FATALS(SmokeExternalMiniCluster(opts, cluster.get(),
&master_rpc_addresses));
+ ASSERT_EQ(opts.master_rpc_addresses, master_rpc_addresses);
+
+ // Shutdown the cluster explicitly. This is not strictly necessary since
+ // the cluster will be shutdown upon the call of ExternalMiniCluster's
+ // destructor, but this is done in the context of testing. This is to verify
+ // that ExternalMiniCluster object destructor works as expected in the case
+ // if the cluster has already been shutdown.
+ cluster->Shutdown();
}
} // namespace cluster