This is an automated email from the ASF dual-hosted git repository.

alexey pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/kudu.git

commit a24fd5bb1a7434b8f8230121ba3e2145c1b8205b
Author: Alexey Serbin <[email protected]>
AuthorDate: Fri Dec 20 21:56:41 2024 -0800

    [common] remove std::unique_lock template parameter
    
    As of C++17, it's no longer necessary to specify the template
    parameter explicitly when the compiler can deduce it from the
    constructor's arguments.  This patch removes all instances of this
    for std::unique_lock.
    
    There are no functional modifications in this changelist.
    
    Change-Id: If989375d0f7eb14edd149c9cd9ff976c11bea421
    Reviewed-on: http://gerrit.cloudera.org:8080/22257
    Tested-by: Kudu Jenkins
    Reviewed-by: Marton Greber <[email protected]>
    Reviewed-by: Gabriella Lotz <[email protected]>
---
 src/kudu/client/authz_token_cache.cc             |  2 +-
 src/kudu/client/batcher.cc                       |  4 +-
 src/kudu/client/client-internal.cc               |  2 +-
 src/kudu/client/meta_cache.cc                    |  2 +-
 src/kudu/clock/hybrid_clock.cc                   |  2 +-
 src/kudu/consensus/consensus_peers.cc            |  4 +-
 src/kudu/consensus/consensus_queue.cc            |  2 +-
 src/kudu/consensus/log_cache.cc                  |  4 +-
 src/kudu/consensus/raft_consensus.cc             | 62 ++++++++++++------------
 src/kudu/consensus/raft_consensus.h              |  3 --
 src/kudu/consensus/raft_consensus_quorum-test.cc |  6 +--
 src/kudu/fs/block_manager-stress-test.cc         |  2 +-
 src/kudu/fs/fs_manager.h                         |  8 ++-
 src/kudu/fs/log_block_manager.cc                 |  2 +-
 src/kudu/integration-tests/authz_token-itest.cc  |  7 ++-
 src/kudu/master/catalog_manager.h                |  3 +-
 src/kudu/rpc/periodic.cc                         |  2 +-
 src/kudu/rpc/reactor.cc                          |  2 +-
 src/kudu/rpc/reactor.h                           |  3 +-
 src/kudu/rpc/rpcz_store.cc                       |  2 +-
 src/kudu/rpc/service_queue.cc                    |  2 +-
 src/kudu/security/token_signer.cc                |  2 +-
 src/kudu/tablet/mvcc.h                           |  3 +-
 src/kudu/tablet/ops/alter_schema_op.cc           |  6 ++-
 src/kudu/tablet/ops/op_driver.cc                 |  2 +-
 src/kudu/tablet/ops/participant_op.cc            |  4 +-
 src/kudu/tablet/ops/write_op.cc                  |  8 +--
 src/kudu/tablet/rowset.h                         |  2 +-
 src/kudu/tablet/tablet.cc                        | 12 ++---
 src/kudu/tablet/tablet_metadata.cc               |  2 +-
 src/kudu/tablet/tablet_metadata.h                |  3 +-
 src/kudu/tablet/tablet_replica.cc                |  2 +-
 src/kudu/tablet/tablet_replica_mm_ops.cc         |  4 +-
 src/kudu/tablet/txn_participant.cc               |  2 +-
 src/kudu/tools/rebalancer_tool.cc                |  2 +-
 src/kudu/transactions/txn_status_manager.cc      |  6 +--
 src/kudu/tserver/scanners.h                      |  2 +-
 src/kudu/tserver/ts_tablet_manager.cc            |  2 +-
 src/kudu/util/kernel_stack_watchdog.cc           |  2 +-
 src/kudu/util/oid_generator.h                    |  4 +-
 src/kudu/util/rw_mutex-test.cc                   |  2 +-
 41 files changed, 94 insertions(+), 104 deletions(-)

diff --git a/src/kudu/client/authz_token_cache.cc 
b/src/kudu/client/authz_token_cache.cc
index 64d8aa07f..9a1ad49a9 100644
--- a/src/kudu/client/authz_token_cache.cc
+++ b/src/kudu/client/authz_token_cache.cc
@@ -121,7 +121,7 @@ void AuthzTokenCache::RetrieveNewAuthzToken(const 
KuduTable* table,
   DCHECK(table);
   DCHECK(deadline.Initialized());
   const string& table_id = table->id();
-  std::unique_lock<simple_spinlock> l(rpc_lock_);
+  std::unique_lock l(rpc_lock_);
   // If there already exists an RPC for this table; attach the callback.
   auto* rpc_and_cbs = FindOrNull(authz_rpcs_, table_id);
   if (rpc_and_cbs) {
diff --git a/src/kudu/client/batcher.cc b/src/kudu/client/batcher.cc
index 762e9b0af..b4490f4d5 100644
--- a/src/kudu/client/batcher.cc
+++ b/src/kudu/client/batcher.cc
@@ -632,7 +632,7 @@ Batcher::Batcher(KuduClient* client,
 }
 
 void Batcher::Abort() {
-  std::unique_lock<simple_spinlock> l(lock_);
+  std::unique_lock l(lock_);
   state_ = kAborted;
 
   vector<InFlightOp*> to_abort;
@@ -811,7 +811,7 @@ void Batcher::TabletLookupFinished(InFlightOp* op, const 
Status& s) {
   // Acquire the batcher lock early to atomically:
   // 1. Test if the batcher was aborted, and
   // 2. Change the op state.
-  std::unique_lock<simple_spinlock> l(lock_);
+  std::unique_lock l(lock_);
 
   if (IsAbortedUnlocked()) {
     VLOG(1) << "Aborted batch: TabletLookupFinished for " << op->ToString();
diff --git a/src/kudu/client/client-internal.cc 
b/src/kudu/client/client-internal.cc
index 9a2237e53..fbd6048b1 100644
--- a/src/kudu/client/client-internal.cc
+++ b/src/kudu/client/client-internal.cc
@@ -863,7 +863,7 @@ void KuduClient::Data::ConnectToClusterAsync(KuduClient* 
client,
   // this information in parallel, since the requests should end up with the
   // same result. Instead, simply piggy-back onto the existing request by 
adding
   // our the callback to leader_master_callbacks_{any_creds,primary_creds}_.
-  std::unique_lock<simple_spinlock> l(leader_master_lock_);
+  std::unique_lock l(leader_master_lock_);
 
   // Optimize sending out a new request in the presence of already existing
   // requests to the leader master. Depending on the credentials policy for the
diff --git a/src/kudu/client/meta_cache.cc b/src/kudu/client/meta_cache.cc
index 25aca9b6b..a880fdbcd 100644
--- a/src/kudu/client/meta_cache.cc
+++ b/src/kudu/client/meta_cache.cc
@@ -155,7 +155,7 @@ void RemoteTabletServer::DnsResolutionFinished(const 
HostPort& hp,
 void RemoteTabletServer::InitProxy(KuduClient* client, const StatusCallback& 
cb) {
   HostPort hp;
   {
-    std::unique_lock<simple_spinlock> l(lock_);
+    std::unique_lock l(lock_);
 
     if (proxy_) {
       // Already have a proxy created.
diff --git a/src/kudu/clock/hybrid_clock.cc b/src/kudu/clock/hybrid_clock.cc
index da4660fc9..812759182 100644
--- a/src/kudu/clock/hybrid_clock.cc
+++ b/src/kudu/clock/hybrid_clock.cc
@@ -726,7 +726,7 @@ Status HybridClock::WalltimeWithError(uint64_t* now_usec, 
uint64_t* error_usec)
   } else {
     // We failed to read the clock. Extrapolate the new time based on our
     // last successful read.
-    std::unique_lock<decltype(last_clock_read_lock_)> l(last_clock_read_lock_);
+    std::unique_lock l(last_clock_read_lock_);
     if (!is_extrapolating_) {
       is_extrapolating_ = true;
       extrapolating_->set_value(is_extrapolating_);
diff --git a/src/kudu/consensus/consensus_peers.cc 
b/src/kudu/consensus/consensus_peers.cc
index aebb07968..9fa2bb39e 100644
--- a/src/kudu/consensus/consensus_peers.cc
+++ b/src/kudu/consensus/consensus_peers.cc
@@ -190,7 +190,7 @@ Status Peer::SignalRequest(bool even_if_queue_empty) {
 }
 
 void Peer::SendNextRequest(bool even_if_queue_empty) {
-  std::unique_lock<simple_spinlock> l(peer_lock_);
+  std::unique_lock l(peer_lock_);
   if (PREDICT_FALSE(closed_)) {
     return;
   }
@@ -439,7 +439,7 @@ Status Peer::PrepareTabletCopyRequest() {
 
 void Peer::ProcessTabletCopyResponse() {
   // If the peer is already closed return.
-  std::unique_lock<simple_spinlock> lock(peer_lock_);
+  std::unique_lock lock(peer_lock_);
   if (PREDICT_FALSE(closed_)) {
     return;
   }
diff --git a/src/kudu/consensus/consensus_queue.cc 
b/src/kudu/consensus/consensus_queue.cc
index 4f279a8fe..90d1c7930 100644
--- a/src/kudu/consensus/consensus_queue.cc
+++ b/src/kudu/consensus/consensus_queue.cc
@@ -403,7 +403,7 @@ Status 
PeerMessageQueue::AppendOperations(vector<ReplicateRefPtr> msgs,
                                           const StatusCallback& 
log_append_callback) {
 
   DFAKE_SCOPED_LOCK(append_fake_lock_);
-  std::unique_lock<simple_spinlock> lock(queue_lock_);
+  std::unique_lock lock(queue_lock_);
 
   OpId last_id = msgs.back()->get()->id();
 
diff --git a/src/kudu/consensus/log_cache.cc b/src/kudu/consensus/log_cache.cc
index 7702843c1..77e66f8bb 100644
--- a/src/kudu/consensus/log_cache.cc
+++ b/src/kudu/consensus/log_cache.cc
@@ -164,7 +164,7 @@ Status LogCache::AppendOperations(vector<ReplicateRefPtr> 
msgs,
   const int64_t first_idx_in_batch = msgs.front()->get()->id().index();
   const int64_t last_idx_in_batch = msgs.back()->get()->id().index();
 
-  std::unique_lock<simple_spinlock> l(lock_);
+  std::unique_lock l(lock_);
   // If we're not appending a consecutive op we're likely overwriting and
   // need to replace operations in the cache.
   if (first_idx_in_batch != next_sequential_op_index_) {
@@ -299,7 +299,7 @@ Status LogCache::ReadOps(int64_t after_op_index,
   int64_t remaining_space = max_size_bytes;
   int64_t next_index = after_op_index + 1;
 
-  std::unique_lock<simple_spinlock> l(lock_);
+  std::unique_lock l(lock_);
   while (remaining_space > 0 && next_index < next_sequential_op_index_) {
     // If the messages the peer needs haven't been loaded into the queue yet,
     // load them.
diff --git a/src/kudu/consensus/raft_consensus.cc 
b/src/kudu/consensus/raft_consensus.cc
index 6be58bbf8..f496d90ca 100644
--- a/src/kudu/consensus/raft_consensus.cc
+++ b/src/kudu/consensus/raft_consensus.cc
@@ -209,7 +209,7 @@ RaftConsensus::RaftConsensus(
 }
 
 Status RaftConsensus::Init() {
-  LockGuard l(lock_);
+  std::lock_guard l(lock_);
   DCHECK_EQ(kNew, state_) << State_Name(state_);
   RETURN_NOT_OK(cmeta_manager_->Load(options_.tablet_id, &cmeta_));
   SetStateUnlocked(kInitialized);
@@ -332,7 +332,7 @@ Status RaftConsensus::Start(const ConsensusBootstrapInfo& 
info,
 
   {
     ThreadRestrictions::AssertWaitAllowed();
-    LockGuard l(lock_);
+    std::lock_guard l(lock_);
     CHECK_EQ(kInitialized, state_) << LogPrefixUnlocked() << "Illegal state 
for Start(): "
                                    << State_Name(state_);
 
@@ -415,7 +415,7 @@ Status RaftConsensus::EmulateElectionForTests() {
                "tablet", options_.tablet_id);
 
   ThreadRestrictions::AssertWaitAllowed();
-  LockGuard l(lock_);
+  std::lock_guard l(lock_);
   RETURN_NOT_OK(CheckRunningUnlocked());
 
   LOG_WITH_PREFIX_UNLOCKED(INFO) << "Emulating election...";
@@ -466,7 +466,7 @@ Status RaftConsensus::StartElection(ElectionMode mode, 
ElectionReason reason) {
   scoped_refptr<LeaderElection> election;
   {
     ThreadRestrictions::AssertWaitAllowed();
-    LockGuard l(lock_);
+    std::lock_guard l(lock_);
     RETURN_NOT_OK(CheckRunningUnlocked());
 
     RaftPeerPB::Role active_role = cmeta_->active_role();
@@ -574,7 +574,7 @@ Status RaftConsensus::WaitUntilLeader(const MonoDelta& 
timeout) {
 Status RaftConsensus::StepDown(LeaderStepDownResponsePB* resp) {
   TRACE_EVENT0("consensus", "RaftConsensus::StepDown");
   ThreadRestrictions::AssertWaitAllowed();
-  LockGuard l(lock_);
+  std::lock_guard l(lock_);
   DCHECK((queue_->IsInLeaderMode() && cmeta_->active_role() == 
RaftPeerPB::LEADER) ||
          (!queue_->IsInLeaderMode() && cmeta_->active_role() != 
RaftPeerPB::LEADER));
   RETURN_NOT_OK(CheckRunningUnlocked());
@@ -600,7 +600,7 @@ Status RaftConsensus::TransferLeadership(const 
optional<string>& new_leader_uuid
                                          LeaderStepDownResponsePB* resp) {
   TRACE_EVENT0("consensus", "RaftConsensus::TransferLeadership");
   ThreadRestrictions::AssertWaitAllowed();
-  LockGuard l(lock_);
+  std::lock_guard l(lock_);
   LOG_WITH_PREFIX_UNLOCKED(INFO) << "Received request to transfer leadership"
                                  << (new_leader_uuid ?
                                     Substitute(" to TS $0", *new_leader_uuid) :
@@ -763,7 +763,7 @@ Status RaftConsensus::Replicate(const 
scoped_refptr<ConsensusRound>& round) {
   std::lock_guard lock(update_lock_);
   {
     ThreadRestrictions::AssertWaitAllowed();
-    LockGuard l(lock_);
+    std::lock_guard l(lock_);
     RETURN_NOT_OK(CheckSafeToReplicateUnlocked(*round->replicate_msg()));
     RETURN_NOT_OK(round->CheckBoundTerm(CurrentTermUnlocked()));
     RETURN_NOT_OK(AppendNewRoundToQueueUnlocked(round));
@@ -896,7 +896,7 @@ void RaftConsensus::NotifyCommitIndex(int64_t commit_index) 
{
                "commit_index", commit_index);
 
   ThreadRestrictions::AssertWaitAllowed();
-  LockGuard l(lock_);
+  std::lock_guard l(lock_);
   // We will process commit notifications while shutting down because a replica
   // which has initiated a Prepare() / Replicate() may eventually commit even 
if
   // its state has changed after the initial Append() / Update().
@@ -921,7 +921,7 @@ void RaftConsensus::NotifyTermChange(int64_t term) {
                "term", term);
 
   ThreadRestrictions::AssertWaitAllowed();
-  LockGuard l(lock_);
+  std::lock_guard l(lock_);
   Status s = CheckRunningUnlocked();
   if (PREDICT_FALSE(!s.ok())) {
     LOG_WITH_PREFIX_UNLOCKED(WARNING) << "Unable to handle notification of new 
term "
@@ -947,7 +947,7 @@ void RaftConsensus::NotifyFailedFollower(const string& uuid,
   RaftConfigPB committed_config;
   {
     ThreadRestrictions::AssertWaitAllowed();
-    LockGuard l(lock_);
+    std::lock_guard l(lock_);
     int64_t current_term = CurrentTermUnlocked();
     if (current_term != term) {
       LOG_WITH_PREFIX_UNLOCKED(INFO) << fail_msg << "Notified about a follower 
failure in "
@@ -1016,7 +1016,7 @@ void RaftConsensus::TryPromoteNonVoterTask(const string& 
peer_uuid) {
   int64_t current_committed_config_index;
   {
     ThreadRestrictions::AssertWaitAllowed();
-    LockGuard l(lock_);
+    std::lock_guard l(lock_);
 
     if (cmeta_->has_pending_config()) {
      LOG_WITH_PREFIX_UNLOCKED(INFO) << msg << "there is already a config 
change operation "
@@ -1065,7 +1065,7 @@ void RaftConsensus::TryPromoteNonVoterTask(const string& 
peer_uuid) {
 
 void RaftConsensus::TryStartElectionOnPeerTask(const string& peer_uuid) {
   ThreadRestrictions::AssertWaitAllowed();
-  LockGuard l(lock_);
+  std::lock_guard l(lock_);
   // Double-check that the peer is a voter in the active config.
   if (!IsRaftConfigVoter(peer_uuid, cmeta_->ActiveConfig())) {
     LOG_WITH_PREFIX_UNLOCKED(INFO) << "Not signalling peer " << peer_uuid
@@ -1132,7 +1132,7 @@ Status RaftConsensus::StartFollowerOpUnlocked(const 
ReplicateRefPtr& msg) {
 
 bool RaftConsensus::IsSingleVoterConfig() const {
   ThreadRestrictions::AssertWaitAllowed();
-  LockGuard l(lock_);
+  std::lock_guard l(lock_);
   return cmeta_->CountVotersInConfig(COMMITTED_CONFIG) == 1 &&
          cmeta_->IsVoterInConfig(peer_uuid(), COMMITTED_CONFIG);
 }
@@ -1470,7 +1470,7 @@ Status RaftConsensus::UpdateReplica(const 
ConsensusRequestPB* request,
   auto& messages = deduped_req.messages;
   {
     ThreadRestrictions::AssertWaitAllowed();
-    LockGuard l(lock_);
+    std::lock_guard l(lock_);
     RETURN_NOT_OK(CheckRunningUnlocked());
     if (!cmeta_->IsMemberInConfig(peer_uuid(), ACTIVE_CONFIG)) {
       LOG_WITH_PREFIX_UNLOCKED(INFO) << "Allowing update even though not a 
member of the config";
@@ -1747,7 +1747,7 @@ Status RaftConsensus::RequestVote(const VoteRequestPB* 
request,
   // We must acquire the update lock in order to ensure that this vote action
   // takes place between requests.
   // Lock ordering: update_lock_ must be acquired before lock_.
-  std::unique_lock<simple_spinlock> update_guard(update_lock_, 
std::defer_lock);
+  std::unique_lock update_guard(update_lock_, std::defer_lock);
   if (PREDICT_TRUE(FLAGS_enable_leader_failure_detection)) {
     update_guard.try_lock();
   } else {
@@ -1765,7 +1765,7 @@ Status RaftConsensus::RequestVote(const VoteRequestPB* 
request,
 
   // Acquire the replica state lock so we can read / modify the consensus 
state.
   ThreadRestrictions::AssertWaitAllowed();
-  LockGuard l(lock_);
+  std::lock_guard l(lock_);
 
   // Ensure our lifecycle state is compatible with voting.
   // If RaftConsensus is running, we use the latest OpId from the WAL to vote.
@@ -1900,7 +1900,7 @@ Status RaftConsensus::BulkChangeConfig(const 
BulkChangeConfigRequestPB& req,
                "tablet", options_.tablet_id);
   {
     ThreadRestrictions::AssertWaitAllowed();
-    LockGuard l(lock_);
+    std::lock_guard l(lock_);
     RETURN_NOT_OK(CheckRunningUnlocked());
     RETURN_NOT_OK(CheckActiveLeaderUnlocked());
     RETURN_NOT_OK(CheckNoConfigChangePendingUnlocked());
@@ -2107,7 +2107,7 @@ Status RaftConsensus::UnsafeChangeConfig(
     // Take the snapshot of the replica state and queue state so that
     // we can stick them in the consensus update request later.
     ThreadRestrictions::AssertWaitAllowed();
-    LockGuard l(lock_);
+    std::lock_guard l(lock_);
     current_term = CurrentTermUnlocked();
     committed_config = cmeta_->CommittedConfig();
     if (cmeta_->has_pending_config()) {
@@ -2228,7 +2228,7 @@ void RaftConsensus::Stop() {
                "tablet", options_.tablet_id);
   {
     ThreadRestrictions::AssertWaitAllowed();
-    LockGuard l(lock_);
+    std::lock_guard l(lock_);
     const State state = state_;
     if (state == kStopping || state == kStopped || state == kShutdown) {
       return;
@@ -2252,7 +2252,7 @@ void RaftConsensus::Stop() {
 
   {
     ThreadRestrictions::AssertWaitAllowed();
-    LockGuard l(lock_);
+    std::lock_guard l(lock_);
     if (pending_) {
       CHECK_OK(pending_->CancelPendingOps());
     }
@@ -2288,7 +2288,7 @@ void RaftConsensus::Shutdown() {
 
   Stop();
   {
-    LockGuard l(lock_);
+    std::lock_guard l(lock_);
     SetStateUnlocked(kShutdown);
   }
   shutdown_ = true;
@@ -2317,7 +2317,7 @@ Status 
RaftConsensus::StartConsensusOnlyRoundUnlocked(const ReplicateRefPtr& msg
 
 Status RaftConsensus::AdvanceTermForTests(int64_t new_term) {
   ThreadRestrictions::AssertWaitAllowed();
-  LockGuard l(lock_);
+  std::lock_guard l(lock_);
   CHECK_OK(CheckRunningUnlocked());
   return HandleTermAdvanceUnlocked(new_term);
 }
@@ -2469,7 +2469,7 @@ Status RaftConsensus::RequestVoteRespondVoteGranted(const 
VoteRequestPB* request
 
 RaftPeerPB::Role RaftConsensus::role() const {
   ThreadRestrictions::AssertWaitAllowed();
-  LockGuard l(lock_);
+  std::lock_guard l(lock_);
   return cmeta_->active_role();
 }
 
@@ -2479,7 +2479,7 @@ RaftConsensus::RoleAndMemberType 
RaftConsensus::GetRoleAndMemberType() const {
   auto member_type = RaftPeerPB::UNKNOWN_MEMBER_TYPE;
   const auto& local_peer_uuid = peer_uuid();
 
-  LockGuard l(lock_);
+  std::lock_guard l(lock_);
   for (const auto& peer : cmeta_->ActiveConfig().peers()) {
     if (peer.permanent_uuid() == local_peer_uuid) {
       member_type = peer.member_type();
@@ -2491,7 +2491,7 @@ RaftConsensus::RoleAndMemberType 
RaftConsensus::GetRoleAndMemberType() const {
 }
 
 int64_t RaftConsensus::CurrentTerm() const {
-  LockGuard l(lock_);
+  std::lock_guard l(lock_);
   return CurrentTermUnlocked();
 }
 
@@ -2606,7 +2606,7 @@ const string& RaftConsensus::tablet_id() const {
 Status RaftConsensus::ConsensusState(ConsensusStatePB* cstate,
                                      IncludeHealthReport report_health) const {
   ThreadRestrictions::AssertWaitAllowed();
-  UniqueLock l(lock_);
+  std::unique_lock l(lock_);
   if (state_ == kShutdown) {
     return Status::IllegalState("Tablet replica is shutdown");
   }
@@ -2637,7 +2637,7 @@ Status RaftConsensus::ConsensusState(ConsensusStatePB* 
cstate,
 
 RaftConfigPB RaftConsensus::CommittedConfig() const {
   ThreadRestrictions::AssertWaitAllowed();
-  LockGuard l(lock_);
+  std::lock_guard l(lock_);
   return cmeta_->CommittedConfig();
 }
 
@@ -2688,7 +2688,7 @@ void RaftConsensus::DoElectionCallback(ElectionReason 
reason, const ElectionResu
   const char* election_type = was_pre_election ? "pre-election" : "election";
 
   ThreadRestrictions::AssertWaitAllowed();
-  UniqueLock l(lock_);
+  std::unique_lock l(lock_);
 
   // Record the duration of the election regardless of the outcome.
   auto update_metrics = MakeScopedCleanup([&]() {
@@ -2815,7 +2815,7 @@ void RaftConsensus::DoElectionCallback(ElectionReason 
reason, const ElectionResu
 
 optional<OpId> RaftConsensus::GetLastOpId(OpIdType type) {
   ThreadRestrictions::AssertWaitAllowed();
-  LockGuard l(lock_);
+  std::lock_guard l(lock_);
   return GetLastOpIdUnlocked(type);
 }
 
@@ -3232,7 +3232,7 @@ const ConsensusOptions& RaftConsensus::GetOptions() const 
{
 
 string RaftConsensus::LogPrefix() const {
   ThreadRestrictions::AssertWaitAllowed();
-  LockGuard l(lock_);
+  std::lock_guard l(lock_);
   return LogPrefixUnlocked();
 }
 
@@ -3250,7 +3250,7 @@ string RaftConsensus::LogPrefixUnlocked() const {
 
 string RaftConsensus::ToString() const {
   ThreadRestrictions::AssertWaitAllowed();
-  LockGuard l(lock_);
+  std::lock_guard l(lock_);
   return ToStringUnlocked();
 }
 
diff --git a/src/kudu/consensus/raft_consensus.h 
b/src/kudu/consensus/raft_consensus.h
index 6f26faf50..921d9210d 100644
--- a/src/kudu/consensus/raft_consensus.h
+++ b/src/kudu/consensus/raft_consensus.h
@@ -477,9 +477,6 @@ class RaftConsensus : public 
std::enable_shared_from_this<RaftConsensus>,
     std::string OpsRangeString() const;
   };
 
-  using LockGuard = std::lock_guard<simple_spinlock>;
-  using UniqueLock = std::unique_lock<simple_spinlock>;
-
   // Returns string description for State enum value.
   static const char* State_Name(State state);
 
diff --git a/src/kudu/consensus/raft_consensus_quorum-test.cc 
b/src/kudu/consensus/raft_consensus_quorum-test.cc
index ee613e1fd..67ebe7458 100644
--- a/src/kudu/consensus/raft_consensus_quorum-test.cc
+++ b/src/kudu/consensus/raft_consensus_quorum-test.cc
@@ -702,7 +702,7 @@ TEST_F(RaftConsensusQuorumTest, 
TestConsensusContinuesIfAMinorityFallsBehind) {
     shared_ptr<RaftConsensus> follower0;
     CHECK_OK(peers_->GetPeerByIdx(kFollower0Idx, &follower0));
 
-    RaftConsensus::LockGuard l(follower0->lock_);
+    std::lock_guard l(follower0->lock_);
 
     // If the locked replica would stop consensus we would hang here
     // as we wait for operations to be replicated to a majority.
@@ -740,11 +740,11 @@ TEST_F(RaftConsensusQuorumTest, 
TestConsensusStopsIfAMajorityFallsBehind) {
     // and never letting them go.
     shared_ptr<RaftConsensus> follower0;
     CHECK_OK(peers_->GetPeerByIdx(kFollower0Idx, &follower0));
-    RaftConsensus::LockGuard l_0(follower0->lock_);
+    std::lock_guard l_0(follower0->lock_);
 
     shared_ptr<RaftConsensus> follower1;
     CHECK_OK(peers_->GetPeerByIdx(kFollower1Idx, &follower1));
-    RaftConsensus::LockGuard l_1(follower1->lock_);
+    std::lock_guard l_1(follower1->lock_);
 
     // Append a single message to the queue
     ASSERT_OK(AppendDummyMessage(kLeaderIdx, &round));
diff --git a/src/kudu/fs/block_manager-stress-test.cc 
b/src/kudu/fs/block_manager-stress-test.cc
index fecb97cf6..031006c3b 100644
--- a/src/kudu/fs/block_manager-stress-test.cc
+++ b/src/kudu/fs/block_manager-stress-test.cc
@@ -441,7 +441,7 @@ void BlockManagerStressTest<T>::DeleterThread() {
     shared_ptr<BlockDeletionTransaction> deletion_transaction =
         this->bm_->NewDeletionTransaction();
     {
-      std::unique_lock<simple_spinlock> l(lock_);
+      std::unique_lock l(lock_);
       // If we only have a small number of live blocks, don't delete any.
       // This ensures that, when we restart, we always have a reasonable
       // amount of data -- otherwise the deletion threads are likely to
diff --git a/src/kudu/fs/fs_manager.h b/src/kudu/fs/fs_manager.h
index d0f94383b..7c288f05f 100644
--- a/src/kudu/fs/fs_manager.h
+++ b/src/kudu/fs/fs_manager.h
@@ -628,10 +628,8 @@ class FsManager {
   static const char *kInstanceMetadataFileName;
   static const char *kConsensusMetadataDirName;
 
-  typedef rw_spinlock LockType;
-
   // Lock protecting the env_map_ below.
-  mutable LockType env_lock_;
+  mutable rw_spinlock env_lock_;
   // The environment to be used for all filesystem operations.
   // Different tenant use different env.
   typedef std::map<std::string, std::shared_ptr<Env>> EnvMap;
@@ -662,13 +660,13 @@ class FsManager {
   // Shared by all the block managers.
   scoped_refptr<fs::FsErrorManager> error_manager_;
   // Lock protecting 'dd_manager_map_' below.
-  mutable LockType ddm_lock_;
+  mutable rw_spinlock ddm_lock_;
   typedef scoped_refptr<fs::DataDirManager> ScopedDDManagerPtr;
   typedef std::map<std::string, ScopedDDManagerPtr> DataDirManagerMap;
   DataDirManagerMap dd_manager_map_;
 
   // Lock protecting 'block_manager_map_'.
-  mutable LockType bm_lock_;
+  mutable rw_spinlock bm_lock_;
   typedef scoped_refptr<fs::BlockManager> ScopedBlockManagerPtr;
   typedef std::map<std::string, ScopedBlockManagerPtr> BlockManagerMap;
   BlockManagerMap block_manager_map_;
diff --git a/src/kudu/fs/log_block_manager.cc b/src/kudu/fs/log_block_manager.cc
index 2c7bc12c6..e3e05462b 100644
--- a/src/kudu/fs/log_block_manager.cc
+++ b/src/kudu/fs/log_block_manager.cc
@@ -1091,7 +1091,7 @@ void LogBlockContainerNativeMeta::CompactMetadata() {
   SCOPED_LOG_SLOW_EXECUTION(WARNING, 5, Substitute("CompactMetadata $0", 
ToString()));
   // Skip compacting if lock failed to reduce overhead, metadata is on 
compacting or will be
   // compacted next time.
-  std::unique_lock<RWMutex> l(metadata_compact_lock_, std::try_to_lock);
+  std::unique_lock l(metadata_compact_lock_, std::try_to_lock);
   if (!l.owns_lock()) {
     return;
   }
diff --git a/src/kudu/integration-tests/authz_token-itest.cc 
b/src/kudu/integration-tests/authz_token-itest.cc
index ddc9dccaa..7898390ea 100644
--- a/src/kudu/integration-tests/authz_token-itest.cc
+++ b/src/kudu/integration-tests/authz_token-itest.cc
@@ -22,6 +22,7 @@
 #include <ostream>
 #include <string>
 #include <thread>
+#include <type_traits>
 #include <utility>
 #include <vector>
 
@@ -62,6 +63,7 @@ using std::string;
 using std::thread;
 using std::unique_ptr;
 using std::vector;
+using strings::Substitute;
 
 DECLARE_bool(master_support_authz_tokens);
 DECLARE_bool(tserver_enforce_access_control);
@@ -76,8 +78,6 @@ 
METRIC_DECLARE_histogram(handler_latency_kudu_master_MasterService_GetTableSchem
 
 namespace kudu {
 
-class RWMutex;
-
 using cluster::InternalMiniCluster;
 using cluster::InternalMiniClusterOptions;
 using client::AuthenticationCredentialsPB;
@@ -97,7 +97,6 @@ using security::SignedTokenPB;
 using security::TablePrivilegePB;
 using security::TokenSigner;
 using security::TokenSigningPrivateKeyPB;
-using strings::Substitute;
 
 namespace {
 
@@ -426,7 +425,7 @@ TEST_F(AuthzTokenTest, TestSingleMasterUnavailable) {
 
   // Take the leader lock on the master, which will prevent successful attempts
   // to get a new token, but will allow retries.
-  std::unique_lock<RWMutex> leader_lock(
+  std::unique_lock leader_lock(
       cluster_->mini_master()->master()->catalog_manager()->leader_lock_);
 
   // After a while, the client operation will time out.
diff --git a/src/kudu/master/catalog_manager.h 
b/src/kudu/master/catalog_manager.h
index 5acfd526c..0d6848749 100644
--- a/src/kudu/master/catalog_manager.h
+++ b/src/kudu/master/catalog_manager.h
@@ -1358,8 +1358,7 @@ class CatalogManager : public 
tserver::TabletReplicaLookupIf {
   // easy to make a "gettable set".
 
   // Lock protecting the various maps and sets below.
-  typedef rw_spinlock LockType;
-  mutable LockType lock_;
+  mutable rw_spinlock lock_;
 
   // Table maps: table-id -> TableInfo and normalized-table-name -> TableInfo
   TableInfoMap table_ids_map_;
diff --git a/src/kudu/rpc/periodic.cc b/src/kudu/rpc/periodic.cc
index 21631f1f0..c4d02a9a8 100644
--- a/src/kudu/rpc/periodic.cc
+++ b/src/kudu/rpc/periodic.cc
@@ -74,7 +74,7 @@ PeriodicTimer::~PeriodicTimer() {
 }
 
 void PeriodicTimer::Start(optional<MonoDelta> next_task_delta) {
-  std::unique_lock<simple_spinlock> l(lock_);
+  std::unique_lock l(lock_);
   if (!started_) {
     started_ = true;
     SnoozeUnlocked(std::move(next_task_delta));
diff --git a/src/kudu/rpc/reactor.cc b/src/kudu/rpc/reactor.cc
index 5a0378680..114a8fba0 100644
--- a/src/kudu/rpc/reactor.cc
+++ b/src/kudu/rpc/reactor.cc
@@ -954,7 +954,7 @@ void Reactor::QueueCancellation(shared_ptr<OutboundCall> 
call) {
 void Reactor::ScheduleReactorTask(ReactorTask* task) {
   bool was_empty;
   {
-    std::unique_lock<LockType> l(lock_);
+    std::unique_lock l(lock_);
     if (PREDICT_FALSE(closing_)) {
       // We guarantee the reactor lock is not taken when calling Abort().
       l.unlock();
diff --git a/src/kudu/rpc/reactor.h b/src/kudu/rpc/reactor.h
index 8944a2b2d..a74b37846 100644
--- a/src/kudu/rpc/reactor.h
+++ b/src/kudu/rpc/reactor.h
@@ -405,8 +405,7 @@ class Reactor {
 
  private:
   friend class ReactorThread;
-  typedef simple_spinlock LockType;
-  mutable LockType lock_;
+  mutable simple_spinlock lock_;
 
   // parent messenger
   std::shared_ptr<Messenger> messenger_;
diff --git a/src/kudu/rpc/rpcz_store.cc b/src/kudu/rpc/rpcz_store.cc
index 97e28b2f6..8bbbfd17f 100644
--- a/src/kudu/rpc/rpcz_store.cc
+++ b/src/kudu/rpc/rpcz_store.cc
@@ -165,7 +165,7 @@ void MethodSampler::SampleCall(InboundCall* call) {
   int64_t us_since_trace = now - bucket->last_sample_time;
   if (us_since_trace > kSampleIntervalUs) {
     {
-      std::unique_lock<simple_spinlock> lock(bucket->sample_lock, 
std::try_to_lock);
+      std::unique_lock lock(bucket->sample_lock, std::try_to_lock);
       // If another thread is already taking a sample, it's not worth waiting.
       if (!lock.owns_lock()) {
         return;
diff --git a/src/kudu/rpc/service_queue.cc b/src/kudu/rpc/service_queue.cc
index fe7356775..a3259a621 100644
--- a/src/kudu/rpc/service_queue.cc
+++ b/src/kudu/rpc/service_queue.cc
@@ -76,7 +76,7 @@ bool 
LifoServiceQueue::BlockingGet(std::unique_ptr<InboundCall>* out) {
 
 QueueStatus LifoServiceQueue::Put(InboundCall* call,
                                   std::optional<InboundCall*>* evicted) {
-  std::unique_lock<simple_spinlock> l(lock_);
+  std::unique_lock l(lock_);
   if (PREDICT_FALSE(shutdown_)) {
     return QUEUE_SHUTDOWN;
   }
diff --git a/src/kudu/security/token_signer.cc 
b/src/kudu/security/token_signer.cc
index 319e76e75..8306f13a7 100644
--- a/src/kudu/security/token_signer.cc
+++ b/src/kudu/security/token_signer.cc
@@ -207,7 +207,7 @@ Status 
TokenSigner::CheckNeedKey(unique_ptr<TokenSigningPrivateKey>* tsk) const
   CHECK(tsk);
   const int64_t now = WallTime_Now();
 
-  unique_lock<RWMutex> l(lock_);
+  unique_lock l(lock_);
   if (tsk_deque_.empty()) {
     // No active key: need a new one.
     const int64_t key_seq_num = last_key_seq_num_ + 1;
diff --git a/src/kudu/tablet/mvcc.h b/src/kudu/tablet/mvcc.h
index 88352dc58..045581c5c 100644
--- a/src/kudu/tablet/mvcc.h
+++ b/src/kudu/tablet/mvcc.h
@@ -426,8 +426,7 @@ class MvccManager {
   // finishes applying or aborts.
   void AdvanceEarliestInFlightTimestamp();
 
-  typedef simple_spinlock LockType;
-  mutable LockType lock_;
+  mutable simple_spinlock lock_;
 
   // The kLatest snapshot that gets updated with op timestamps as MVCC ops
   // start and complete through the lifespan of this MvccManager.
diff --git a/src/kudu/tablet/ops/alter_schema_op.cc 
b/src/kudu/tablet/ops/alter_schema_op.cc
index cec6585a9..ba32b18ec 100644
--- a/src/kudu/tablet/ops/alter_schema_op.cc
+++ b/src/kudu/tablet/ops/alter_schema_op.cc
@@ -61,13 +61,15 @@ using tserver::TabletServerErrorPB;
 
 void AlterSchemaOpState::AcquireSchemaLock(rw_semaphore* l) {
   TRACE("Acquiring schema lock in exclusive mode");
-  schema_lock_ = std::unique_lock<rw_semaphore>(*l);
+  decltype(schema_lock_) tmp(*l);
+  schema_lock_.swap(tmp);
   TRACE("Acquired schema lock");
 }
 
 void AlterSchemaOpState::ReleaseSchemaLock() {
   CHECK(schema_lock_.owns_lock());
-  schema_lock_ = std::unique_lock<rw_semaphore>();
+  decltype(schema_lock_) tmp;
+  schema_lock_.swap(tmp);
   TRACE("Released schema lock");
 }
 
diff --git a/src/kudu/tablet/ops/op_driver.cc b/src/kudu/tablet/ops/op_driver.cc
index 41cae488a..b081f624f 100644
--- a/src/kudu/tablet/ops/op_driver.cc
+++ b/src/kudu/tablet/ops/op_driver.cc
@@ -497,7 +497,7 @@ void OpDriver::Abort(const Status& status) {
 
 Status OpDriver::ApplyAsync() {
   {
-    std::unique_lock<simple_spinlock> lock(lock_);
+    std::unique_lock lock(lock_);
     DCHECK_EQ(prepare_state_, PREPARED);
     if (op_status_.ok()) {
       DCHECK_EQ(replication_state_, REPLICATED);
diff --git a/src/kudu/tablet/ops/participant_op.cc 
b/src/kudu/tablet/ops/participant_op.cc
index 598b88043..8b9f0a45b 100644
--- a/src/kudu/tablet/ops/participant_op.cc
+++ b/src/kudu/tablet/ops/participant_op.cc
@@ -64,7 +64,6 @@ using std::unique_ptr;
 using strings::Substitute;
 
 namespace kudu {
-class rw_semaphore;
 
 namespace tablet {
 
@@ -88,7 +87,8 @@ void ParticipantOpState::AcquireTxnAndLock() {
 
 void ParticipantOpState::ReleaseTxn() {
   if (txn_lock_.owns_lock()) {
-    txn_lock_ = std::unique_lock<rw_semaphore>();
+    decltype(txn_lock_) tmp;
+    txn_lock_.swap(tmp);
   }
   txn_.reset();
   TRACE("Released txn lock");
diff --git a/src/kudu/tablet/ops/write_op.cc b/src/kudu/tablet/ops/write_op.cc
index dba3108c1..2b99abe69 100644
--- a/src/kudu/tablet/ops/write_op.cc
+++ b/src/kudu/tablet/ops/write_op.cc
@@ -430,8 +430,8 @@ void WriteOpState::set_txn_rowsets(const 
scoped_refptr<TxnRowSets>& rowsets) {
 
 void WriteOpState::AcquireSchemaLock(rw_semaphore* schema_lock) {
   TRACE("Acquiring schema lock in shared mode");
-  shared_lock temp(*schema_lock);
-  schema_lock_.swap(temp);
+  decltype(schema_lock_) tmp(*schema_lock);
+  schema_lock_.swap(tmp);
   TRACE("Acquired schema lock");
 }
 
@@ -449,8 +449,8 @@ Status 
WriteOpState::AcquireTxnLockCheckOpen(scoped_refptr<Txn> txn) {
 }
 
 void WriteOpState::ReleaseSchemaLock() {
-  shared_lock<rw_semaphore> temp;
-  schema_lock_.swap(temp);
+  decltype(schema_lock_) tmp;
+  schema_lock_.swap(tmp);
   TRACE("Released schema lock");
 }
 
diff --git a/src/kudu/tablet/rowset.h b/src/kudu/tablet/rowset.h
index 229af3928..c483e127d 100644
--- a/src/kudu/tablet/rowset.h
+++ b/src/kudu/tablet/rowset.h
@@ -310,7 +310,7 @@ class RowSet {
     // the compaction selection has finished because only one thread
     // makes compaction selection at a time on a given Tablet due to
     // Tablet::compact_select_lock_.
-    std::unique_lock<std::mutex> try_lock(*compact_flush_lock(), 
std::try_to_lock);
+    std::unique_lock try_lock(*compact_flush_lock(), std::try_to_lock);
     return try_lock.owns_lock() && !has_been_compacted();
   }
 
diff --git a/src/kudu/tablet/tablet.cc b/src/kudu/tablet/tablet.cc
index bb96b1af7..81a2ba223 100644
--- a/src/kudu/tablet/tablet.cc
+++ b/src/kudu/tablet/tablet.cc
@@ -1660,7 +1660,7 @@ Status 
Tablet::ReplaceMemRowSetsUnlocked(RowSetsInCompactionOrFlush* new_mrss,
   // Mark the memrowsets as locked, so compactions won't consider it
   // for inclusion in any concurrent compactions.
   for (auto& mrs : *old_mrss) {
-    std::unique_lock<std::mutex> ms_lock(*mrs->compact_flush_lock(), 
std::try_to_lock);
+    std::unique_lock ms_lock(*mrs->compact_flush_lock(), std::try_to_lock);
     CHECK(ms_lock.owns_lock());
     new_mrss->AddRowSet(mrs, std::move(ms_lock));
   }
@@ -1861,7 +1861,7 @@ Status 
Tablet::PickRowSetsToCompact(RowSetsInCompactionOrFlush *picked,
     // compaction from selecting this same rowset, and also ensures that
     // we don't select a rowset which is currently in the middle of being
     // flushed.
-    std::unique_lock<std::mutex> lock(*rs->compact_flush_lock(), 
std::try_to_lock);
+    std::unique_lock lock(*rs->compact_flush_lock(), std::try_to_lock);
     CHECK(lock.owns_lock()) << rs->ToString() << " appeared available for "
       "compaction when inputs were selected, but was unable to lock its "
       "compact_flush_lock to prepare for compaction.";
@@ -2864,7 +2864,7 @@ Status 
Tablet::CompactWorstDeltas(RowSet::DeltaCompactionType type) {
     if (!rs) {
       return Status::OK();
     }
-    lock = std::unique_lock<std::mutex>(*rs->compact_flush_lock(), 
std::try_to_lock);
+    lock = std::unique_lock(*rs->compact_flush_lock(), std::try_to_lock);
     CHECK(lock.owns_lock());
   }
 
@@ -2892,7 +2892,7 @@ double 
Tablet::GetPerfImprovementForBestDeltaCompact(RowSet::DeltaCompactionType
 double 
Tablet::GetPerfImprovementForBestDeltaCompactUnlocked(RowSet::DeltaCompactionType
 type,
                                                              
shared_ptr<RowSet>* rs) const {
 #ifndef NDEBUG
-  std::unique_lock<std::mutex> cs_lock(compact_select_lock_, std::try_to_lock);
+  std::unique_lock cs_lock(compact_select_lock_, std::try_to_lock);
   CHECK(!cs_lock.owns_lock());
 #endif
   scoped_refptr<TabletComponents> comps;
@@ -3069,7 +3069,7 @@ Status Tablet::DeleteAncientDeletedRowsets() {
       if (deleted_and_empty) {
         // If we intend on deleting the rowset, take its lock so concurrent
         // compactions don't try to select it for compactions.
-        std::unique_lock<std::mutex> l(*rowset->compact_flush_lock(), 
std::try_to_lock);
+        std::unique_lock l(*rowset->compact_flush_lock(), std::try_to_lock);
         CHECK(l.owns_lock());
         to_delete.emplace_back(rowset);
         rowset_locks.emplace_back(std::move(l));
@@ -3108,7 +3108,7 @@ Status Tablet::DeleteAncientUndoDeltas(int64_t* 
blocks_deleted, int64_t* bytes_d
       if (!rowset->IsAvailableForCompaction()) {
         continue;
       }
-      std::unique_lock<std::mutex> lock(*rowset->compact_flush_lock(), 
std::try_to_lock);
+      std::unique_lock lock(*rowset->compact_flush_lock(), std::try_to_lock);
       CHECK(lock.owns_lock()) << rowset->ToString() << " unable to lock 
compact_flush_lock";
       rowsets_to_gc_undos.push_back(rowset);
       rowset_locks.push_back(std::move(lock));
diff --git a/src/kudu/tablet/tablet_metadata.cc 
b/src/kudu/tablet/tablet_metadata.cc
index 2dfea8d6e..027c2af8c 100644
--- a/src/kudu/tablet/tablet_metadata.cc
+++ b/src/kudu/tablet/tablet_metadata.cc
@@ -597,7 +597,7 @@ void TabletMetadata::PinFlush() {
 }
 
 Status TabletMetadata::UnPinFlush() {
-  std::unique_lock<LockType> l(data_lock_);
+  std::unique_lock l(data_lock_);
   CHECK_GT(num_flush_pins_, 0);
   num_flush_pins_--;
   if (needs_flush_) {
diff --git a/src/kudu/tablet/tablet_metadata.h 
b/src/kudu/tablet/tablet_metadata.h
index 5eeb362c4..d87991a11 100644
--- a/src/kudu/tablet/tablet_metadata.h
+++ b/src/kudu/tablet/tablet_metadata.h
@@ -414,8 +414,7 @@ class TabletMetadata : public 
RefCountedThreadSafe<TabletMetadata> {
   State state_;
 
   // Lock protecting the underlying data.
-  typedef simple_spinlock LockType;
-  mutable LockType data_lock_;
+  mutable simple_spinlock data_lock_;
 
   // Lock protecting flushing the data to disk.
   // If taken together with 'data_lock_', must be acquired first.
diff --git a/src/kudu/tablet/tablet_replica.cc 
b/src/kudu/tablet/tablet_replica.cc
index bc90e1169..e89d3c65e 100644
--- a/src/kudu/tablet/tablet_replica.cc
+++ b/src/kudu/tablet/tablet_replica.cc
@@ -321,7 +321,7 @@ const consensus::RaftConfigPB TabletReplica::RaftConfig() 
const {
 
 void TabletReplica::Stop() {
   {
-    std::unique_lock<simple_spinlock> lock(lock_);
+    std::unique_lock lock(lock_);
     if (state_ == STOPPING || state_ == STOPPED ||
         state_ == SHUTDOWN || state_ == FAILED) {
       lock.unlock();
diff --git a/src/kudu/tablet/tablet_replica_mm_ops.cc 
b/src/kudu/tablet/tablet_replica_mm_ops.cc
index 2998664cf..48bf6cf49 100644
--- a/src/kudu/tablet/tablet_replica_mm_ops.cc
+++ b/src/kudu/tablet/tablet_replica_mm_ops.cc
@@ -175,8 +175,8 @@ void FlushMRSOp::UpdateStats(MaintenanceOpStats* stats) {
   }
 
   {
-    std::unique_lock<Semaphore> 
lock(tablet_replica_->tablet()->rowsets_flush_sem_,
-                                     std::defer_lock);
+    std::unique_lock lock(tablet_replica_->tablet()->rowsets_flush_sem_,
+                          std::defer_lock);
     stats->set_runnable(lock.try_lock());
   }
 
diff --git a/src/kudu/tablet/txn_participant.cc 
b/src/kudu/tablet/txn_participant.cc
index cc89ff577..c6b687b1e 100644
--- a/src/kudu/tablet/txn_participant.cc
+++ b/src/kudu/tablet/txn_participant.cc
@@ -66,7 +66,7 @@ Txn::~Txn() {
 }
 
 void Txn::AcquireWriteLock(std::unique_lock<rw_semaphore>* txn_lock) {
-  std::unique_lock<rw_semaphore> l(state_lock_);
+  std::unique_lock l(state_lock_);
   *txn_lock = std::move(l);
 }
 
diff --git a/src/kudu/tools/rebalancer_tool.cc 
b/src/kudu/tools/rebalancer_tool.cc
index 0af0324da..30f4ca543 100644
--- a/src/kudu/tools/rebalancer_tool.cc
+++ b/src/kudu/tools/rebalancer_tool.cc
@@ -838,7 +838,7 @@ Status RebalancerTool::GetClusterRawInfo(const 
optional<string>& location,
 }
 
 Status RebalancerTool::RefreshKsckResults() {
-  std::unique_lock<std::mutex> refresh_guard(ksck_refresh_lock_);
+  std::unique_lock refresh_guard(ksck_refresh_lock_);
   if (ksck_refreshing_) {
     // Other thread is already refreshing the ksck info.
     ksck_refresh_cv_.wait(refresh_guard, [this]{ return !ksck_refreshing_; });
diff --git a/src/kudu/transactions/txn_status_manager.cc 
b/src/kudu/transactions/txn_status_manager.cc
index 3942f9f3e..ddbd68e37 100644
--- a/src/kudu/transactions/txn_status_manager.cc
+++ b/src/kudu/transactions/txn_status_manager.cc
@@ -1069,7 +1069,7 @@ Status TxnStatusManager::BeginCommitTransaction(int64_t 
txn_id, const string& us
 
   if (PREDICT_TRUE(FLAGS_txn_schedule_background_tasks)) {
     auto participant_ids = txn->GetParticipantIds();
-    std::unique_lock<simple_spinlock> l(lock_);
+    std::unique_lock l(lock_);
     auto [map_iter, emplaced] = commits_in_flight_.emplace(txn_id,
         new CommitTasks(txn_id, std::move(participant_ids),
                         txn_client, commit_pool_, this));
@@ -1204,7 +1204,7 @@ Status TxnStatusManager::BeginAbortTransaction(int64_t 
txn_id,
       // (and have removed the commit tasks), while at the same time, we've
       // just served a client-initiated abort and so the state is already
       // ABORT_IN_PROGRESS. If so, we should start abort tasks.
-      std::unique_lock<simple_spinlock> l(lock_);
+      std::unique_lock l(lock_);
       if (PREDICT_FALSE(!ContainsKey(commits_in_flight_, txn_id))) {
         auto participant_ids = txn->GetParticipantIds();
         auto tasks = EmplaceOrDie(&commits_in_flight_, txn_id,
@@ -1233,7 +1233,7 @@ Status TxnStatusManager::BeginAbortTransaction(int64_t 
txn_id,
 
   if (PREDICT_TRUE(FLAGS_txn_schedule_background_tasks)) {
     auto participant_ids = txn->GetParticipantIds();
-    std::unique_lock<simple_spinlock> l(lock_);
+    std::unique_lock l(lock_);
     auto [map_iter, emplaced] = commits_in_flight_.emplace(txn_id,
         new CommitTasks(txn_id, std::move(participant_ids),
                         txn_client, commit_pool_, this));
diff --git a/src/kudu/tserver/scanners.h b/src/kudu/tserver/scanners.h
index 7dbebe950..7d82edb6d 100644
--- a/src/kudu/tserver/scanners.h
+++ b/src/kudu/tserver/scanners.h
@@ -340,7 +340,7 @@ class Scanner {
 
   // Return the delta from the last time this scan was updated to 'now'.
   MonoDelta TimeSinceLastAccess(const MonoTime& now) const {
-    std::unique_lock<Mutex> l(lock_, std::try_to_lock);
+    std::unique_lock l(lock_, std::try_to_lock);
     if (l.owns_lock()) {
       return now - last_access_time_;
     }
diff --git a/src/kudu/tserver/ts_tablet_manager.cc 
b/src/kudu/tserver/ts_tablet_manager.cc
index 30715d70e..b56524dde 100644
--- a/src/kudu/tserver/ts_tablet_manager.cc
+++ b/src/kudu/tserver/ts_tablet_manager.cc
@@ -2048,7 +2048,7 @@ Status 
TSTabletManager::WaitForNoTransitionsForTests(const MonoDelta& timeout) c
 
 void TSTabletManager::UpdateTabletStatsIfNecessary() {
   // Only one thread is allowed to update at the same time.
-  std::unique_lock<rw_spinlock> try_lock(lock_update_, std::try_to_lock);
+  std::unique_lock try_lock(lock_update_, std::try_to_lock);
   if (!try_lock.owns_lock()) {
     return;
   }
diff --git a/src/kudu/util/kernel_stack_watchdog.cc 
b/src/kudu/util/kernel_stack_watchdog.cc
index 3946757c2..93dd6e382 100644
--- a/src/kudu/util/kernel_stack_watchdog.cc
+++ b/src/kudu/util/kernel_stack_watchdog.cc
@@ -106,7 +106,7 @@ void KernelStackWatchdog::Unregister() {
 
   std::unique_ptr<TLS> tls(tls_);
   {
-    std::unique_lock<Mutex> l(unregister_lock_, std::try_to_lock);
+    std::unique_lock l(unregister_lock_, std::try_to_lock);
     lock_guard l2(tls_lock_);
     CHECK(tls_by_tid_.erase(tid));
     if (!l.owns_lock()) {
diff --git a/src/kudu/util/oid_generator.h b/src/kudu/util/oid_generator.h
index c9015cbf5..24eb9135a 100644
--- a/src/kudu/util/oid_generator.h
+++ b/src/kudu/util/oid_generator.h
@@ -53,10 +53,8 @@ class ObjectIdGenerator {
  private:
   DISALLOW_COPY_AND_ASSIGN(ObjectIdGenerator);
 
-  typedef simple_spinlock LockType;
-
   // Protects 'oid_generator_'.
-  LockType oid_lock_;
+  simple_spinlock oid_lock_;
 
   // Generates new UUIDs.
   boost::uuids::random_generator oid_generator_;
diff --git a/src/kudu/util/rw_mutex-test.cc b/src/kudu/util/rw_mutex-test.cc
index 24b2592be..6d1e3b13a 100644
--- a/src/kudu/util/rw_mutex-test.cc
+++ b/src/kudu/util/rw_mutex-test.cc
@@ -73,7 +73,7 @@ TEST_P(RWMutexTest, TestDeadlocks) {
     });
     threads.emplace_back([&](){
       while (!done.Load()) {
-        unique_lock<RWMutex> l(lock_, try_to_lock);
+        unique_lock l(lock_, try_to_lock);
         if (l.owns_lock()) {
           number_of_writes++;
         }


Reply via email to