This is an automated email from the ASF dual-hosted git repository.

alexey pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/kudu.git


The following commit(s) were added to refs/heads/master by this push:
     new 95894ffa0 [common] remove std::shared_lock template parameter
95894ffa0 is described below

commit 95894ffa0e2a0390c209f415b25f7caee8a51015
Author: Alexey Serbin <[email protected]>
AuthorDate: Tue Oct 1 13:56:21 2024 -0700

    [common] remove std::shared_lock template parameter
    
    As of C++17, it's no longer necessary to specify the template
    parameter explicitly when the compiler can deduce it from the
    constructor's arguments.  This patch removes all instances of this
    for std::shared_lock.
    
    There are no functional modifications in this changelist.
    
    Change-Id: Iae02322df7149597757ffd10c3553d6745c62b78
    Reviewed-on: http://gerrit.cloudera.org:8080/22222
    Tested-by: Kudu Jenkins
    Reviewed-by: Mahesh Reddy <[email protected]>
    Reviewed-by: Abhishek Chennaka <[email protected]>
---
 src/kudu/client/meta_cache.cc         |  6 ++--
 src/kudu/clock/builtin_ntp.cc         | 17 +++++-----
 src/kudu/common/generic_iterators.cc  |  4 +--
 src/kudu/consensus/log.cc             |  8 ++---
 src/kudu/consensus/log.h              |  6 ++--
 src/kudu/fs/data_dirs.cc              |  5 ++-
 src/kudu/fs/dir_manager.cc            |  4 +--
 src/kudu/fs/dir_manager.h             |  7 ++---
 src/kudu/fs/fs_manager.cc             | 17 +++++-----
 src/kudu/fs/log_block_manager.cc      | 14 +++++----
 src/kudu/master/catalog_manager.cc    | 58 +++++++++++++++++------------------
 src/kudu/master/catalog_manager.h     |  5 ++-
 src/kudu/master/location_cache.cc     |  3 +-
 src/kudu/master/ts_descriptor.cc      | 20 ++++++------
 src/kudu/master/ts_descriptor.h       | 10 +++---
 src/kudu/master/ts_manager.cc         | 22 ++++++-------
 src/kudu/rpc/messenger.cc             |  4 +--
 src/kudu/rpc/messenger.h              |  5 ++-
 src/kudu/rpc/result_tracker.h         |  2 +-
 src/kudu/rpc/rpcz_store.cc            |  4 +--
 src/kudu/security/tls_context.cc      |  6 ++--
 src/kudu/security/tls_context.h       | 11 +++----
 src/kudu/security/token_signer.cc     |  4 +--
 src/kudu/security/token_verifier.cc   |  7 ++---
 src/kudu/server/webserver.cc          |  5 ++-
 src/kudu/tablet/delta_tracker.cc      | 28 ++++++++---------
 src/kudu/tablet/diskrowset.cc         |  2 +-
 src/kudu/tablet/ops/write_op.cc       |  2 +-
 src/kudu/tablet/tablet.cc             | 26 ++++++++--------
 src/kudu/tablet/tablet.h              |  7 ++---
 src/kudu/tablet/txn_participant.cc    |  2 +-
 src/kudu/tools/rebalancer_tool.cc     |  6 ++--
 src/kudu/tserver/scanners.cc          | 16 +++++-----
 src/kudu/tserver/ts_tablet_manager.cc | 20 ++++++------
 src/kudu/tserver/ts_tablet_manager.h  |  5 ++-
 src/kudu/util/locks.h                 |  6 ++--
 src/kudu/util/rw_mutex-test.cc        | 10 +++---
 src/kudu/util/rw_semaphore-test.cc    |  3 +-
 src/kudu/util/thread.cc               |  6 ++--
 39 files changed, 191 insertions(+), 202 deletions(-)

diff --git a/src/kudu/client/meta_cache.cc b/src/kudu/client/meta_cache.cc
index 9183ced3d..25aca9b6b 100644
--- a/src/kudu/client/meta_cache.cc
+++ b/src/kudu/client/meta_cache.cc
@@ -1259,7 +1259,7 @@ bool MetaCache::LookupEntryByKeyFastPath(const KuduTable* 
table,
                                          const PartitionKey& partition_key,
                                          MetaCacheEntry* entry) {
   SCOPED_LOG_SLOW_EXECUTION(WARNING, 50, "looking up entry by key");
-  shared_lock<rw_spinlock> l(lock_.get_lock());
+  shared_lock l(lock_.get_lock());
   const TabletMap* tablets = FindOrNull(tablets_by_table_and_key_, 
table->id());
   if (PREDICT_FALSE(!tablets)) {
     // No cache available for this table.
@@ -1328,7 +1328,7 @@ Status MetaCache::DoFastPathLookup(const KuduTable* table,
 bool MetaCache::LookupEntryByIdFastPath(const string& tablet_id,
                                         MetaCacheEntry* entry) {
   SCOPED_LOG_SLOW_EXECUTION(WARNING, 50, "looking up entry by ID");
-  shared_lock<rw_spinlock> l(lock_.get_lock());
+  shared_lock l(lock_.get_lock());
   const auto* cache_entry = FindOrNull(entry_by_tablet_id_, tablet_id);
   if (PREDICT_FALSE(!cache_entry)) {
     return false;
@@ -1511,7 +1511,7 @@ void MetaCache::MarkTSFailed(RemoteTabletServer* ts,
   SCOPED_LOG_SLOW_EXECUTION(WARNING, 50, "marking tablet server as failed");
   const auto ts_status = status.CloneAndPrepend("TS failed");
 
-  shared_lock<rw_spinlock> l(lock_.get_lock());
+  shared_lock l(lock_.get_lock());
   // TODO(adar): replace with a ts->tablet multimap for faster lookup?
   for (const auto& tablet : tablets_by_id_) {
     // We just loop on all tablets; if a tablet does not have a replica on this
diff --git a/src/kudu/clock/builtin_ntp.cc b/src/kudu/clock/builtin_ntp.cc
index 1bfd026fc..b2f1073b6 100644
--- a/src/kudu/clock/builtin_ntp.cc
+++ b/src/kudu/clock/builtin_ntp.cc
@@ -33,7 +33,6 @@
 #include <ostream>
 #include <shared_mutex>
 #include <string>
-#include <type_traits>
 #include <utility>
 #include <vector>
 
@@ -400,7 +399,7 @@ class BuiltInNtp::ServerState {
   }
 
   MonoTime next_poll() const {
-    shared_lock<rw_spinlock> l(lock_);
+    shared_lock l(lock_);
     return next_poll_;
   }
 
@@ -415,7 +414,7 @@ class BuiltInNtp::ServerState {
   }
 
   const Sockaddr& cur_addr() const {
-    shared_lock<rw_spinlock> l(lock_);
+    shared_lock l(lock_);
     return addrs_[addr_idx_ % addrs_.size()];
   }
 
@@ -453,7 +452,7 @@ class BuiltInNtp::ServerState {
   }
 
   Status GetBestResponse(RecordedResponse* response) const {
-    shared_lock<rw_spinlock> l(lock_);
+    shared_lock l(lock_);
     // For now, just return the freshest response.
     // TODO(KUDU-2939): when the dispersion of the samples is being updated
     //                  over time, select the best sample among all available
@@ -496,7 +495,7 @@ class BuiltInNtp::ServerState {
 
   void DumpDiagnostics(string* diag) const {
     DCHECK(diag);
-    shared_lock<rw_spinlock> l(lock_);
+    shared_lock l(lock_);
     StrAppend(diag, "server ", host_.ToString(), ": ");
     auto addrs_list = JoinMapped(
         addrs_, [](const Sockaddr& addr) { return addr.ToString(); }, ",");
@@ -559,7 +558,7 @@ Status BuiltInNtp::Init() {
 Status BuiltInNtp::WalltimeWithError(uint64_t* now_usec, uint64_t* error_usec) 
{
   WalltimeSnapshot last;
   {
-    shared_lock<rw_spinlock> l(last_computed_lock_);
+    shared_lock l(last_computed_lock_);
     last = last_computed_;
   }
 
@@ -596,7 +595,7 @@ void BuiltInNtp::DumpDiagnostics(vector<string>* log) const 
{
   }
   WalltimeSnapshot last;
   {
-    shared_lock<rw_spinlock> l(last_computed_lock_);
+    shared_lock l(last_computed_lock_);
     last = last_computed_;
   }
   StrAppend(&diag, "is_synchronized=",
@@ -1156,12 +1155,12 @@ int64_t BuiltInNtp::LocalClockDeltaForMetrics() {
 }
 
 int64_t BuiltInNtp::WalltimeForMetrics() {
-  shared_lock<rw_spinlock> l(last_computed_lock_);
+  shared_lock l(last_computed_lock_);
   return last_computed_.wall;
 }
 
 int64_t BuiltInNtp::MaxErrorForMetrics() {
-  shared_lock<rw_spinlock> l(last_computed_lock_);
+  shared_lock l(last_computed_lock_);
   return last_computed_.error;
 }
 
diff --git a/src/kudu/common/generic_iterators.cc 
b/src/kudu/common/generic_iterators.cc
index 94ecf181a..2c8c9e2ec 100644
--- a/src/kudu/common/generic_iterators.cc
+++ b/src/kudu/common/generic_iterators.cc
@@ -907,7 +907,7 @@ const Schema& MergeIterator::schema() const {
 }
 
 void MergeIterator::GetIteratorStats(vector<IteratorStats>* stats) const {
-  shared_lock<rw_spinlock> l(states_lock_);
+  shared_lock l(states_lock_);
   CHECK(initted_);
   *stats = finished_iter_stats_by_col_;
 
@@ -1097,7 +1097,7 @@ string UnionIterator::ToString() const {
 
 void UnionIterator::GetIteratorStats(vector<IteratorStats>* stats) const {
   CHECK(initted_);
-  shared_lock<rw_spinlock> l(iters_lock_);
+  shared_lock l(iters_lock_);
   *stats = finished_iter_stats_by_col_;
   if (!iters_.empty()) {
     AddIterStats(*iters_.front().iter, stats);
diff --git a/src/kudu/consensus/log.cc b/src/kudu/consensus/log.cc
index 42c7cf773..f0073b44e 100644
--- a/src/kudu/consensus/log.cc
+++ b/src/kudu/consensus/log.cc
@@ -708,7 +708,7 @@ Status SegmentAllocator::SwitchToAllocatedSegment(
 
   // Set the new segment's schema.
   {
-    shared_lock<rw_spinlock> l(schema_lock_);
+    shared_lock l(schema_lock_);
     RETURN_NOT_OK(SchemaToPB(schema_, header.mutable_schema()));
     header.set_schema_version(schema_version_);
   }
@@ -1100,7 +1100,7 @@ int64_t Log::GetGCableDataSize(RetentionIndexes 
retention_indexes) const {
   CHECK_GE(retention_indexes.for_durability, 0);
   SegmentSequence segments_to_delete;
   {
-    shared_lock<rw_spinlock> l(state_lock_.get_lock());
+    shared_lock l(state_lock_.get_lock());
     CHECK_EQ(kLogWriting, log_state_);
     GetSegmentsToGCUnlocked(retention_indexes, &segments_to_delete);
   }
@@ -1115,7 +1115,7 @@ void Log::GetReplaySizeMap(std::map<int64_t, int64_t>* 
replay_size) const {
   replay_size->clear();
   SegmentSequence segments;
   {
-    shared_lock<rw_spinlock> l(state_lock_.get_lock());
+    shared_lock l(state_lock_.get_lock());
     CHECK_EQ(kLogWriting, log_state_);
     reader_->GetSegmentsSnapshot(&segments);
   }
@@ -1132,7 +1132,7 @@ void Log::GetReplaySizeMap(std::map<int64_t, int64_t>* 
replay_size) const {
 int64_t Log::OnDiskSize() {
   SegmentSequence segments;
   {
-    shared_lock<rw_spinlock> l(state_lock_.get_lock());
+    shared_lock l(state_lock_.get_lock());
     // If the log is closed, the tablet is either being deleted or tombstoned,
     // so we don't count the size of its log anymore as it should be deleted.
     if (log_state_ == kLogClosed) {
diff --git a/src/kudu/consensus/log.h b/src/kudu/consensus/log.h
index 76ed041a6..9e53d6512 100644
--- a/src/kudu/consensus/log.h
+++ b/src/kudu/consensus/log.h
@@ -24,9 +24,8 @@
 #include <limits>
 #include <map>
 #include <memory>
-#include <shared_mutex>
+#include <shared_mutex> // IWYU pragma: keep
 #include <string>
-#include <type_traits>
 #include <vector>
 
 #include <glog/logging.h>
@@ -57,6 +56,7 @@ class CompressionCodec;
 class FileCache;
 class FsManager;
 class RWFile;
+
 namespace consensus {
 class CommitMsg;
 }  // namespace consensus
@@ -164,7 +164,7 @@ class SegmentAllocator {
   FRIEND_TEST(LogTest, TestAutoStopIdleAppendThread);
   FRIEND_TEST(LogTest, TestWriteAndReadToAndFromInProgressSegment);
   SegmentAllocationState allocation_state() {
-    std::shared_lock<RWMutex> l(allocation_lock_);
+    std::shared_lock l(allocation_lock_);
     return allocation_state_;
   }
 
diff --git a/src/kudu/fs/data_dirs.cc b/src/kudu/fs/data_dirs.cc
index 33fe3bce6..da9278612 100644
--- a/src/kudu/fs/data_dirs.cc
+++ b/src/kudu/fs/data_dirs.cc
@@ -27,7 +27,6 @@
 #include <set>
 #include <shared_mutex>
 #include <string>
-#include <type_traits>
 #include <unordered_map>
 #include <unordered_set>
 #include <utility>
@@ -410,7 +409,7 @@ Status DataDirManager::CreateDataDirGroup(const string& 
tablet_id,
 
 Status DataDirManager::GetDirForBlock(const CreateBlockOptions& opts, Dir** 
dir,
                                       int* new_target_group_size) const {
-  shared_lock<rw_spinlock> lock(dir_group_lock_.get_lock());
+  shared_lock lock(dir_group_lock_.get_lock());
   vector<int> healthy_uuid_indices;
   const DataDirGroup* group = nullptr;
   DataDirGroup group_for_tests;
@@ -497,7 +496,7 @@ void DataDirManager::DeleteDataDirGroup(const std::string& 
tablet_id) {
 
 Status DataDirManager::GetDataDirGroupPB(const string& tablet_id,
                                          DataDirGroupPB* pb) const {
-  shared_lock<rw_spinlock> lock(dir_group_lock_.get_lock());
+  shared_lock lock(dir_group_lock_.get_lock());
   const DataDirGroup* group = FindOrNull(group_by_tablet_map_, tablet_id);
   if (group == nullptr) {
     return Status::NotFound(Substitute(
diff --git a/src/kudu/fs/dir_manager.cc b/src/kudu/fs/dir_manager.cc
index 368e32762..0ba1330c6 100644
--- a/src/kudu/fs/dir_manager.cc
+++ b/src/kudu/fs/dir_manager.cc
@@ -919,7 +919,7 @@ bool DirManager::FindUuidByRoot(const string& root, string* 
uuid) const {
 
 set<string> DirManager::FindTabletsByDirUuidIdx(int uuid_idx) const {
   DCHECK_LT(uuid_idx, dirs_.size());
-  shared_lock<rw_spinlock> lock(dir_group_lock_.get_lock());
+  shared_lock lock(dir_group_lock_.get_lock());
   const set<string>* tablet_set_ptr = FindOrNull(tablets_by_uuid_idx_map_, 
uuid_idx);
   if (tablet_set_ptr) {
     return *tablet_set_ptr;
@@ -958,7 +958,7 @@ Status DirManager::MarkDirFailed(int uuid_idx, const 
string& error_message) {
 
 bool DirManager::IsDirFailed(int uuid_idx) const {
   DCHECK_LT(uuid_idx, dirs_.size());
-  shared_lock<rw_spinlock> lock(dir_group_lock_.get_lock());
+  shared_lock lock(dir_group_lock_.get_lock());
   return ContainsKey(failed_dirs_, uuid_idx);
 }
 
diff --git a/src/kudu/fs/dir_manager.h b/src/kudu/fs/dir_manager.h
index 37aedb99d..94c7c21a0 100644
--- a/src/kudu/fs/dir_manager.h
+++ b/src/kudu/fs/dir_manager.h
@@ -25,9 +25,8 @@
 #include <optional>
 #endif
 #include <set>
-#include <shared_mutex>
+#include <shared_mutex> // IWYU pragma: keep
 #include <string>
-#include <type_traits>
 #include <unordered_map>
 #include <vector>
 
@@ -314,12 +313,12 @@ class DirManager {
   bool IsTabletInFailedDir(const std::string& tablet_id) const;
 
   std::set<int> GetFailedDirs() const {
-    std::shared_lock<rw_spinlock> group_lock(dir_group_lock_.get_lock());
+    std::shared_lock group_lock(dir_group_lock_.get_lock());
     return failed_dirs_;
   }
 
   bool AreAllDirsFailed() const {
-    std::shared_lock<rw_spinlock> group_lock(dir_group_lock_.get_lock());
+    std::shared_lock group_lock(dir_group_lock_.get_lock());
     return failed_dirs_.size() == dirs_.size();
   }
 
diff --git a/src/kudu/fs/fs_manager.cc b/src/kudu/fs/fs_manager.cc
index e2bc1958b..b8a293b46 100644
--- a/src/kudu/fs/fs_manager.cc
+++ b/src/kudu/fs/fs_manager.cc
@@ -24,7 +24,6 @@
 #include <iostream>
 #include <set>
 #include <shared_mutex>
-#include <type_traits>
 #include <unordered_map>
 #include <unordered_set>
 #include <utility>
@@ -740,7 +739,7 @@ Status FsManager::InitAndOpenBlockManager(FsReport* report,
 }
 
 void FsManager::CopyMetadata(unique_ptr<InstanceMetadataPB>* metadata) {
-  shared_lock<rw_spinlock> md_lock(metadata_rwlock_.get_lock());
+  shared_lock md_lock(metadata_rwlock_.get_lock());
   (*metadata)->CopyFrom(*metadata_);
 }
 
@@ -815,7 +814,7 @@ Status FsManager::AddTenantMetadata(const string& 
tenant_name,
                                     const string& tenant_key_version) {
   unique_ptr<InstanceMetadataPB> metadata(new InstanceMetadataPB);
   {
-    shared_lock<rw_spinlock> md_lock(metadata_rwlock_.get_lock());
+    shared_lock md_lock(metadata_rwlock_.get_lock());
     metadata->CopyFrom(*metadata_);
   }
   InstanceMetadataPB::TenantMetadataPB* tenant_metadata = 
metadata->add_tenants();
@@ -836,7 +835,7 @@ Status FsManager::RemoveTenantMetadata(const string& 
tenant_id) {
 
   unique_ptr<InstanceMetadataPB> metadata(new InstanceMetadataPB);
   {
-    shared_lock<rw_spinlock> md_lock(metadata_rwlock_.get_lock());
+    shared_lock md_lock(metadata_rwlock_.get_lock());
     metadata->CopyFrom(*metadata_);
   }
   for (int i = 0; i < metadata->tenants_size(); i++) {
@@ -1096,12 +1095,12 @@ bool FsManager::VertifyTenant(const std::string& 
tenant_id) const {
 }
 
 int32 FsManager::tenants_count() const {
-  shared_lock<rw_spinlock> md_lock(metadata_rwlock_.get_lock());
+  shared_lock md_lock(metadata_rwlock_.get_lock());
   return metadata_->tenants_size();
 }
 
 bool FsManager::is_tenants_exist() const {
-  shared_lock<rw_spinlock> md_lock(metadata_rwlock_.get_lock());
+  shared_lock md_lock(metadata_rwlock_.get_lock());
   return metadata_->tenants_size() > 0;
 }
 
@@ -1124,7 +1123,7 @@ const InstanceMetadataPB_TenantMetadataPB* 
FsManager::GetTenantUnlock(
 
 const InstanceMetadataPB_TenantMetadataPB* FsManager::GetTenant(
     const string& tenant_id) const {
-  shared_lock<rw_spinlock> md_lock(metadata_rwlock_.get_lock());
+  shared_lock md_lock(metadata_rwlock_.get_lock());
   return GetTenantUnlock(tenant_id);
 }
 
@@ -1238,7 +1237,7 @@ Status FsManager::RemoveTenant(const string& tenant_id) {
 
 vector<string> FsManager::GetAllTenants() const {
   vector<string> tenant_ids;
-  shared_lock<rw_spinlock> md_lock(metadata_rwlock_.get_lock());
+  shared_lock md_lock(metadata_rwlock_.get_lock());
   for (const auto& tdata : metadata_->tenants()) {
     tenant_ids.push_back(tdata.tenant_id());
   }
@@ -1464,7 +1463,7 @@ Status FsManager::SetEncryptionKeyUnlock(const string& 
tenant_id) {
 }
 
 Status FsManager::SetEncryptionKey(const string& tenant_id) {
-  shared_lock<rw_spinlock> md_lock(metadata_rwlock_.get_lock());
+  shared_lock md_lock(metadata_rwlock_.get_lock());
   return SetEncryptionKeyUnlock(tenant_id);
 }
 
diff --git a/src/kudu/fs/log_block_manager.cc b/src/kudu/fs/log_block_manager.cc
index cf610526b..2c7bc12c6 100644
--- a/src/kudu/fs/log_block_manager.cc
+++ b/src/kudu/fs/log_block_manager.cc
@@ -854,7 +854,7 @@ class LogBlockContainerNativeMeta final : public 
LogBlockContainer {
     }
 
     // Try lock before reading metadata offset, consider it not full if lock 
failed.
-    shared_lock<RWMutex> l(metadata_compact_lock_, std::try_to_lock);
+    shared_lock l(metadata_compact_lock_, std::try_to_lock);
     if (!l.owns_lock()) {
       return false;
     }
@@ -893,7 +893,7 @@ class LogBlockContainerNativeMeta final : public 
LogBlockContainer {
   }
 
   bool ShouldCompact() const {
-    shared_lock<RWMutex> l(metadata_compact_lock_);
+    shared_lock l(metadata_compact_lock_);
     return ShouldCompactUnlocked();
   }
 
@@ -1668,7 +1668,7 @@ Status 
LogBlockContainerNativeMeta::RemoveBlockIdsFromMetadata(
     deleted_block_ids->resize(deleted_count);
   });
 
-  shared_lock<RWMutex> l(metadata_compact_lock_);
+  shared_lock l(metadata_compact_lock_);
   for (const auto& r : records) {
     RETURN_NOT_OK_HANDLE_ERROR(metadata_file_->Append(r));
     ++deleted_count;
@@ -1692,7 +1692,7 @@ Status LogBlockContainerNativeMeta::AddBlockIdsToMetadata(
     records.emplace_back(record);
   }
 
-  shared_lock<RWMutex> l(metadata_compact_lock_);
+  shared_lock l(metadata_compact_lock_);
   for (const auto& r : records) {
     RETURN_NOT_OK_HANDLE_ERROR(metadata_file_->Append(r));
   }
@@ -1704,8 +1704,10 @@ Status LogBlockContainerNativeMeta::SyncMetadata() {
   VLOG(3) << "Syncing metadata file " << metadata_file_->filename();
   RETURN_NOT_OK_HANDLE_ERROR(read_only_status());
   if (FLAGS_enable_data_block_fsync) {
-    if (metrics_) metrics_->generic_metrics.total_disk_sync->Increment();
-    shared_lock<RWMutex> l(metadata_compact_lock_);
+    if (metrics_) {
+      metrics_->generic_metrics.total_disk_sync->Increment();
+    }
+    shared_lock l(metadata_compact_lock_);
     RETURN_NOT_OK_HANDLE_ERROR(metadata_file_->Sync());
   }
   return Status::OK();
diff --git a/src/kudu/master/catalog_manager.cc 
b/src/kudu/master/catalog_manager.cc
index 726d0662a..ef1ffdb47 100644
--- a/src/kudu/master/catalog_manager.cc
+++ b/src/kudu/master/catalog_manager.cc
@@ -1388,7 +1388,7 @@ Status CatalogManager::InitTokenSigner() {
 void CatalogManager::PrepareForLeadershipTask() {
   {
     // Hack to block this function until InitSysCatalogAsync() is finished.
-    shared_lock<LockType> l(lock_);
+    shared_lock l(lock_);
   }
   const RaftConsensus* consensus = sys_catalog_->tablet_replica()->consensus();
   const int64_t term_before_wait = consensus->CurrentTerm();
@@ -1740,7 +1740,7 @@ void CatalogManager::Shutdown() {
   // tasks for those entries.
   vector<scoped_refptr<TableInfo>> copy;
   {
-    shared_lock<LockType> l(lock_);
+    shared_lock l(lock_);
     AppendValuesFromMap(table_ids_map_, &copy);
   }
   AbortAndWaitForAllTasks(copy);
@@ -2380,7 +2380,7 @@ Status CatalogManager::FindLockAndAuthorizeTable(
   // Set to true if the client-provided table name and ID refer to different 
tables.
   scoped_refptr<TableInfo> table_with_mismatched_name;
   {
-    shared_lock<LockType> l(lock_);
+    shared_lock l(lock_);
     if (table_identifier.has_table_id()) {
       table = FindPtrOrNull(table_ids_map_, table_identifier.table_id());
 
@@ -4078,7 +4078,7 @@ Status CatalogManager::ListTables(const 
ListTablesRequestPB* req,
     if (req->has_show_soft_deleted()) {
       show_soft_deleted = req->show_soft_deleted();
     }
-    shared_lock<LockType> l(lock_);
+    shared_lock l(lock_);
     if (show_soft_deleted) {
       for (const auto& entry : soft_deleted_table_names_map_) {
         tables_info.emplace_back(entry.second);
@@ -4285,7 +4285,7 @@ bool CatalogManager::IsTableWriteDisabled(const 
scoped_refptr<TableInfo>& table,
 Status CatalogManager::GetTableInfo(const string& table_id, 
scoped_refptr<TableInfo> *table) {
   leader_lock_.AssertAcquiredForReading();
 
-  shared_lock<LockType> l(lock_);
+  shared_lock l(lock_);
   *table = FindPtrOrNull(table_ids_map_, table_id);
   return Status::OK();
 }
@@ -4294,7 +4294,7 @@ void CatalogManager::GetTableInfoByName(const string& 
table_name,
                                         scoped_refptr<TableInfo> *table) {
   leader_lock_.AssertAcquiredForReading();
 
-  shared_lock<LockType> l(lock_);
+  shared_lock l(lock_);
   *table = FindPtrOrNull(normalized_table_names_map_, table_name);
 }
 
@@ -4302,7 +4302,7 @@ void 
CatalogManager::GetAllTables(vector<scoped_refptr<TableInfo>>* tables) {
   leader_lock_.AssertAcquiredForReading();
 
   tables->clear();
-  shared_lock<LockType> l(lock_);
+  shared_lock l(lock_);
   AppendValuesFromMap(table_ids_map_, tables);
 }
 
@@ -4310,7 +4310,7 @@ void 
CatalogManager::GetNormalizedTables(vector<scoped_refptr<TableInfo>>* table
   leader_lock_.AssertAcquiredForReading();
 
   tables->clear();
-  shared_lock<LockType> l(lock_);
+  shared_lock l(lock_);
   AppendValuesFromMap(normalized_table_names_map_, tables);
 }
 
@@ -4318,14 +4318,14 @@ void 
CatalogManager::GetAllTabletsForTests(vector<scoped_refptr<TabletInfo>>* ta
   leader_lock_.AssertAcquiredForReading();
 
   tablets->clear();
-  shared_lock<LockType> l(lock_);
+  shared_lock l(lock_);
   AppendValuesFromMap(tablet_map_, tablets);
 }
 
 Status CatalogManager::TableNameExists(const string& table_name, bool* exists) 
{
   leader_lock_.AssertAcquiredForReading();
 
-  shared_lock<LockType> l(lock_);
+  shared_lock l(lock_);
   scoped_refptr<TableInfo> table = FindTableWithNameUnlocked(table_name);
   *exists = (table != nullptr);
   return Status::OK();
@@ -4364,7 +4364,7 @@ Status CatalogManager::GetTabletReplica(const string& 
tablet_id,
                                         scoped_refptr<TabletReplica>* replica) 
const {
   // Note: CatalogManager has only one table, 'sys_catalog', with only
   // one tablet.
-  shared_lock<LockType> l(lock_);
+  shared_lock l(lock_);
   if (!sys_catalog_) {
     return Status::ServiceUnavailable("Systable not yet initialized");
   }
@@ -4380,7 +4380,7 @@ Status CatalogManager::GetTabletReplica(const string& 
tablet_id,
 void CatalogManager::GetTabletReplicas(vector<scoped_refptr<TabletReplica>>* 
replicas) const {
   // Note: CatalogManager has only one table, 'sys_catalog', with only
   // one tablet.
-  shared_lock<LockType> l(lock_);
+  shared_lock l(lock_);
   if (!sys_catalog_) {
     return;
   }
@@ -5336,7 +5336,7 @@ Status CatalogManager::ProcessTabletReport(
     // We only need to acquire lock_ for the tablet_map_ access, but since it's
     // acquired exclusively so rarely, it's probably cheaper to acquire and
     // hold it for all tablets here than to acquire/release it for each tablet.
-    shared_lock<LockType> l(lock_);
+    shared_lock l(lock_);
     for (const ReportedTabletPB& report : full_report.updated_tablets()) {
       const string& tablet_id = report.tablet_id();
 
@@ -5769,7 +5769,7 @@ std::shared_ptr<RaftConsensus> 
CatalogManager::master_consensus() const {
   // CatalogManager::InitSysCatalogAsync takes lock_ in exclusive mode in order
   // to initialize sys_catalog_, so it's sufficient to take lock_ in shared 
mode
   // here to protect access to sys_catalog_.
-  shared_lock<LockType> l(lock_);
+  shared_lock l(lock_);
   if (!sys_catalog_) {
     return nullptr;
   }
@@ -5826,9 +5826,9 @@ void CatalogManager::SendDeleteTabletRequest(const 
scoped_refptr<TabletInfo>& ta
 void CatalogManager::ExtractTabletsToProcess(
     vector<scoped_refptr<TabletInfo>>* tablets_to_process) {
 
-  shared_lock<LockType> l(lock_);
+  shared_lock l(lock_);
 
-  // TODO: At the moment we loop through all the tablets
+  // TODO(matteo.bertozzi): At the moment we loop through all the tablets
   //       we can keep a set of tablets waiting for "assignment"
   //       or just a counter to avoid to take the lock and loop through the 
tablets
   //       if everything is "stable".
@@ -5858,7 +5858,7 @@ void CatalogManager::ExtractTabletsToProcess(
 void CatalogManager::ExtractDeletedTablesAndTablets(
     vector<scoped_refptr<TableInfo>>* deleted_tables,
     vector<scoped_refptr<TabletInfo>>* deleted_tablets) {
-  shared_lock<LockType> l(lock_);
+  shared_lock l(lock_);
   for (const auto& table_entry : table_ids_map_) {
     scoped_refptr<TableInfo> table = table_entry.second;
     TableMetadataLock table_lock(table.get(), LockMode::READ);
@@ -6489,7 +6489,7 @@ Status CatalogManager::GetTabletLocations(const string& 
tablet_id,
   locs_pb->mutable_interned_replicas()->Clear();
   scoped_refptr<TabletInfo> tablet_info;
   {
-    shared_lock<LockType> l(lock_);
+    shared_lock l(lock_);
     // It's OK to return NOT_FOUND back to the client, even with authorization 
enabled,
     // because tablet IDs are randomly generated and don't carry user data.
     if (!FindCopy(tablet_map_, tablet_id, &tablet_info)) {
@@ -6516,7 +6516,7 @@ Status CatalogManager::ReplaceTablet(const string& 
tablet_id, ReplaceTabletRespo
   // Lookup the tablet-to-be-replaced and get its table.
   scoped_refptr<TabletInfo> old_tablet;
   {
-    shared_lock<LockType> l(lock_);
+    shared_lock l(lock_);
     if (!FindCopy(tablet_map_, tablet_id, &old_tablet)) {
       return Status::NotFound(Substitute("Unknown tablet $0", tablet_id));
     }
@@ -6716,7 +6716,7 @@ void CatalogManager::DumpState(std::ostream* out) const {
   // Copy the internal state so that, if the output stream blocks,
   // we don't end up holding the lock for a long time.
   {
-    shared_lock<LockType> l(lock_);
+    shared_lock l(lock_);
     ids_copy = table_ids_map_;
     names_copy = normalized_table_names_map_;
     tablets_copy = tablet_map_;
@@ -7104,7 +7104,7 @@ Status CatalogManager::GetTableStates(const 
TableIdentifierPB& table_identifier,
   scoped_refptr<TableInfo> table_info;
   *is_soft_deleted_table = false;
   // Confirm the table really exists in the system catalog.
-  shared_lock<LockType> l(lock_);
+  shared_lock l(lock_);
   scoped_refptr<TableInfo> table_by_name;
   scoped_refptr<TableInfo> table_by_id;
   if (table_identifier.has_table_name()) {
@@ -7474,7 +7474,7 @@ Status TableInfo::GetTabletsInRange(
     has_key_end = true;
   }
 
-  shared_lock<rw_spinlock> l(lock_);
+  shared_lock l(lock_);
   RawTabletInfoMap::const_iterator it;
   if (has_key_start) {
     it = tablet_map_.upper_bound(partition_key_start);
@@ -7502,7 +7502,7 @@ Status TableInfo::GetTabletsInRange(
 }
 
 bool TableInfo::IsAlterInProgress(uint32_t version) const {
-  shared_lock<rw_spinlock> l(lock_);
+  shared_lock l(lock_);
   auto it = schema_version_counts_.begin();
   if (it == schema_version_counts_.end()) {
     // The table has no tablets.
@@ -7518,7 +7518,7 @@ bool TableInfo::IsAlterInProgress(uint32_t version) const 
{
 }
 
 bool TableInfo::IsCreateInProgress() const {
-  shared_lock<rw_spinlock> l(lock_);
+  shared_lock l(lock_);
   for (const auto& e : tablet_map_) {
     TabletMetadataLock tablet_lock(e.second, LockMode::READ);
     if (!tablet_lock.data().is_running()) {
@@ -7545,7 +7545,7 @@ void TableInfo::RemoveTask(const string& tablet_id, 
MonitoredTask* task) {
 }
 
 void TableInfo::AbortTasks() {
-  shared_lock<rw_spinlock> l(lock_);
+  shared_lock l(lock_);
   for (auto& task : pending_tasks_) {
     task.second->Abort();
   }
@@ -7555,7 +7555,7 @@ void TableInfo::WaitTasksCompletion() {
   int wait_time = 5;
   while (1) {
     {
-      shared_lock<rw_spinlock> l(lock_);
+      shared_lock l(lock_);
       if (pending_tasks_.empty()) {
         break;
       }
@@ -7566,7 +7566,7 @@ void TableInfo::WaitTasksCompletion() {
 }
 
 bool TableInfo::ContainsTask(const string& tablet_id, const string& 
task_description) {
-  shared_lock<rw_spinlock> l(lock_);
+  shared_lock l(lock_);
   auto range = pending_tasks_.equal_range(tablet_id);
   for (auto it = range.first; it != range.second; ++it) {
     if (it->second->description() == task_description) {
@@ -7579,7 +7579,7 @@ bool TableInfo::ContainsTask(const string& tablet_id, 
const string& task_descrip
 void TableInfo::GetTaskList(vector<scoped_refptr<MonitoredTask>>* tasks) {
   tasks->clear();
   {
-    shared_lock<rw_spinlock> l(lock_);
+    shared_lock l(lock_);
     for (const auto& task : pending_tasks_) {
       tasks->push_back(task.second);
     }
@@ -7588,7 +7588,7 @@ void 
TableInfo::GetTaskList(vector<scoped_refptr<MonitoredTask>>* tasks) {
 
 void TableInfo::GetAllTablets(vector<scoped_refptr<TabletInfo>>* ret) const {
   ret->clear();
-  shared_lock<rw_spinlock> l(lock_);
+  shared_lock l(lock_);
   for (const auto& e : tablet_map_) {
     ret->emplace_back(make_scoped_refptr(e.second));
   }
diff --git a/src/kudu/master/catalog_manager.h 
b/src/kudu/master/catalog_manager.h
index 37d59ed22..5acfd526c 100644
--- a/src/kudu/master/catalog_manager.h
+++ b/src/kudu/master/catalog_manager.h
@@ -29,7 +29,6 @@
 #include <set>
 #include <shared_mutex>
 #include <string>
-#include <type_traits>
 #include <unordered_map>
 #include <unordered_set>
 #include <utility>
@@ -393,7 +392,7 @@ class TableInfo : public RefCountedThreadSafe<TableInfo> {
 
   // Returns a snapshot copy of the table info's tablet map.
   TabletInfoMap tablet_map() const {
-    std::shared_lock<rw_spinlock> l(lock_);
+    std::shared_lock l(lock_);
     TabletInfoMap ret;
     for (const auto& e : tablet_map_) {
       ret.emplace(e.first, make_scoped_refptr(e.second));
@@ -403,7 +402,7 @@ class TableInfo : public RefCountedThreadSafe<TableInfo> {
 
   // Returns the number of tablets.
   int num_tablets() const {
-    std::shared_lock<rw_spinlock> l(lock_);
+    std::shared_lock l(lock_);
     return tablet_map_.size();
   }
 
diff --git a/src/kudu/master/location_cache.cc 
b/src/kudu/master/location_cache.cc
index 29f4212f4..8dd6f4e3c 100644
--- a/src/kudu/master/location_cache.cc
+++ b/src/kudu/master/location_cache.cc
@@ -20,7 +20,6 @@
 #include <cstdio>
 #include <shared_mutex>
 #include <string>
-#include <type_traits>
 #include <unordered_map>
 #include <utility>
 #include <vector>
@@ -95,7 +94,7 @@ Status LocationCache::GetLocation(const string& key, string* 
location) {
   }
   {
     // First check whether the location for the key has already been assigned.
-    shared_lock<rw_spinlock> l(location_map_lock_);
+    shared_lock l(location_map_lock_);
     const auto* value_ptr = FindOrNull(location_map_, key);
     if (value_ptr) {
       DCHECK(!value_ptr->empty());
diff --git a/src/kudu/master/ts_descriptor.cc b/src/kudu/master/ts_descriptor.cc
index 96a33e76b..3614cc97e 100644
--- a/src/kudu/master/ts_descriptor.cc
+++ b/src/kudu/master/ts_descriptor.cc
@@ -160,7 +160,7 @@ void TSDescriptor::UpdateHeartbeatTime() {
 
 MonoDelta TSDescriptor::TimeSinceHeartbeat() const {
   MonoTime now(MonoTime::Now());
-  shared_lock<rw_spinlock> l(lock_);
+  shared_lock l(lock_);
   return now - last_heartbeat_;
 }
 
@@ -170,7 +170,7 @@ void TSDescriptor::UpdateNeedsFullTabletReport(bool 
needs_report) {
 }
 
 bool TSDescriptor::needs_full_report() const  {
-  shared_lock<rw_spinlock> l(lock_);
+  shared_lock l(lock_);
   return needs_full_report_;
 }
 
@@ -179,7 +179,7 @@ bool TSDescriptor::PresumedDead() const {
 }
 
 int64_t TSDescriptor::latest_seqno() const {
-  shared_lock<rw_spinlock> l(lock_);
+  shared_lock l(lock_);
   return latest_seqno_;
 }
 
@@ -303,7 +303,7 @@ double TSDescriptor::RecentReplicaCreationsByTable(const 
string& table_id) {
 
 Status TSDescriptor::GetRegistration(ServerRegistrationPB* reg,
                                      bool use_external_addr) const {
-  shared_lock<rw_spinlock> l(lock_);
+  shared_lock l(lock_);
   CHECK(registration_) << "No registration";
   CHECK_NOTNULL(reg)->CopyFrom(*registration_);
 
@@ -334,7 +334,7 @@ Status TSDescriptor::GetRegistration(ServerRegistrationPB* 
reg,
 }
 
 void TSDescriptor::GetTSInfoPB(TSInfoPB* tsinfo_pb, bool use_external_addr) 
const {
-  shared_lock<rw_spinlock> l(lock_);
+  shared_lock l(lock_);
   CHECK(registration_);
   const auto& reg = *registration_;
   tsinfo_pb->mutable_rpc_addresses()->CopyFrom(
@@ -348,7 +348,7 @@ void TSDescriptor::GetTSInfoPB(TSInfoPB* tsinfo_pb, bool 
use_external_addr) cons
 }
 
 void TSDescriptor::GetNodeInstancePB(NodeInstancePB* instance_pb) const {
-  shared_lock<rw_spinlock> l(lock_);
+  shared_lock l(lock_);
   instance_pb->set_permanent_uuid(permanent_uuid_);
   instance_pb->set_instance_seqno(latest_seqno_);
 }
@@ -356,7 +356,7 @@ void TSDescriptor::GetNodeInstancePB(NodeInstancePB* 
instance_pb) const {
 Status TSDescriptor::ResolveSockaddr(Sockaddr* addr, string* host) const {
   vector<HostPort> hostports;
   {
-    shared_lock<rw_spinlock> l(lock_);
+    shared_lock l(lock_);
     for (const HostPortPB& addr : registration_->rpc_addresses()) {
       hostports.emplace_back(addr.host(), addr.port());
     }
@@ -391,7 +391,7 @@ Status TSDescriptor::ResolveSockaddr(Sockaddr* addr, 
string* host) const {
 Status TSDescriptor::GetTSAdminProxy(const shared_ptr<rpc::Messenger>& 
messenger,
                                      
shared_ptr<tserver::TabletServerAdminServiceProxy>* proxy) {
   {
-    shared_lock<rw_spinlock> l(lock_);
+    shared_lock l(lock_);
     if (ts_admin_proxy_) {
       *proxy = ts_admin_proxy_;
       return Status::OK();
@@ -416,7 +416,7 @@ Status TSDescriptor::GetTSAdminProxy(const 
shared_ptr<rpc::Messenger>& messenger
 Status TSDescriptor::GetConsensusProxy(const shared_ptr<rpc::Messenger>& 
messenger,
                                        
shared_ptr<consensus::ConsensusServiceProxy>* proxy) {
   {
-    shared_lock<rw_spinlock> l(lock_);
+    shared_lock l(lock_);
     if (consensus_proxy_) {
       *proxy = consensus_proxy_;
       return Status::OK();
@@ -439,7 +439,7 @@ Status TSDescriptor::GetConsensusProxy(const 
shared_ptr<rpc::Messenger>& messeng
 }
 
 string TSDescriptor::ToString() const {
-  shared_lock<rw_spinlock> l(lock_);
+  shared_lock l(lock_);
   CHECK(!registration_->rpc_addresses().empty());
   const auto& addr = registration_->rpc_addresses(0);
   return Substitute("$0 ($1:$2)", permanent_uuid_, addr.host(), addr.port());
diff --git a/src/kudu/master/ts_descriptor.h b/src/kudu/master/ts_descriptor.h
index 6ead59d5d..427e8379d 100644
--- a/src/kudu/master/ts_descriptor.h
+++ b/src/kudu/master/ts_descriptor.h
@@ -19,7 +19,7 @@
 #include <cstdint>
 #include <memory>
 #include <optional>
-#include <shared_mutex>
+#include <shared_mutex> // IWYU pragma: keep
 #include <string>
 #include <type_traits>  // IWYU pragma: keep
 #include <unordered_map>
@@ -206,7 +206,7 @@ class TSDescriptor : public 
enable_make_shared<TSDescriptor> {
   // If dimension is none, return the total number of replicas in the tablet 
server.
   // Otherwise, return the number of replicas in the dimension.
   int num_live_replicas(const std::optional<std::string>& dimension = 
std::nullopt) const {
-    std::shared_lock<rw_spinlock> l(lock_);
+    std::shared_lock l(lock_);
     if (dimension) {
       int32_t num_live_tablets = 0;
       if (num_live_tablets_by_dimension_) {
@@ -220,7 +220,7 @@ class TSDescriptor : public 
enable_make_shared<TSDescriptor> {
   // Return the number of live replicas (i.e. running or bootstrapping)
   // in the given range for the given table.
   int num_live_replicas_by_range(const std::string& range_key, const 
std::string& table_id) const {
-    std::shared_lock<rw_spinlock> l(lock_);
+    std::shared_lock l(lock_);
     int32_t num_live_tablets_by_range = 0;
     if (const auto* ranges = FindOrNull(
           num_live_tablets_by_range_per_table_, table_id); ranges != nullptr) {
@@ -231,7 +231,7 @@ class TSDescriptor : public 
enable_make_shared<TSDescriptor> {
 
   // Return the number of live replicas (i.e. running or bootstrapping) in the 
given table.
   int num_live_replicas_by_table(const std::string& table_id) const {
-    std::shared_lock<rw_spinlock> l(lock_);
+    std::shared_lock l(lock_);
     int32_t num_live_tablets_by_table = 0;
     if (ContainsKey(num_live_tablets_by_range_per_table_, table_id)) {
       auto ranges = FindOrDie(num_live_tablets_by_range_per_table_, table_id);
@@ -246,7 +246,7 @@ class TSDescriptor : public 
enable_make_shared<TSDescriptor> {
   // since the location could change at any time if the tablet server
   // re-registers.
   std::optional<std::string> location() const {
-    std::shared_lock<rw_spinlock> l(lock_);
+    std::shared_lock l(lock_);
     return location_;
   }
 
diff --git a/src/kudu/master/ts_manager.cc b/src/kudu/master/ts_manager.cc
index a27e081f9..e4754e957 100644
--- a/src/kudu/master/ts_manager.cc
+++ b/src/kudu/master/ts_manager.cc
@@ -109,7 +109,7 @@ TSManager::~TSManager() {
 
 Status TSManager::LookupTS(const NodeInstancePB& instance,
                            shared_ptr<TSDescriptor>* ts_desc) const {
-  shared_lock<rw_spinlock> l(lock_);
+  shared_lock l(lock_);
   const shared_ptr<TSDescriptor>* found_ptr =
     FindOrNull(servers_by_id_, instance.permanent_uuid());
   if (!found_ptr) {
@@ -129,7 +129,7 @@ Status TSManager::LookupTS(const NodeInstancePB& instance,
 
 bool TSManager::LookupTSByUUID(const string& uuid,
                                std::shared_ptr<TSDescriptor>* ts_desc) const {
-  shared_lock<rw_spinlock> l(lock_);
+  shared_lock l(lock_);
   return FindCopy(servers_by_id_, uuid, ts_desc);
 }
 
@@ -201,17 +201,17 @@ Status TSManager::RegisterTS(const NodeInstancePB& 
instance,
 
 void TSManager::GetAllDescriptors(TSDescriptorVector* descs) const {
   descs->clear();
-  shared_lock<rw_spinlock> l(lock_);
+  shared_lock l(lock_);
   AppendValuesFromMap(servers_by_id_, descs);
 }
 
 int TSManager::GetCount() const {
-  shared_lock<rw_spinlock> l(lock_);
+  shared_lock l(lock_);
   return servers_by_id_.size();
 }
 
 int TSManager::GetLiveCount() const {
-  shared_lock<rw_spinlock> l(lock_);
+  shared_lock l(lock_);
   int live_count = 0;
   for (const auto& entry : servers_by_id_) {
     const shared_ptr<TSDescriptor>& ts = entry.second;
@@ -224,7 +224,7 @@ int TSManager::GetLiveCount() const {
 
 unordered_set<string> TSManager::GetUuidsToIgnoreForUnderreplication() const {
   unordered_set<string> uuids;
-  shared_lock<RWMutex> tsl(ts_state_lock_);
+  shared_lock tsl(ts_state_lock_);
   uuids.reserve(ts_state_by_uuid_.size());
   for (const auto& ts_and_state_timestamp : ts_state_by_uuid_) {
     if (ts_and_state_timestamp.second.first == 
TServerStatePB::MAINTENANCE_MODE) {
@@ -235,14 +235,14 @@ unordered_set<string> 
TSManager::GetUuidsToIgnoreForUnderreplication() const {
 }
 
 TServerStateMap TSManager::GetTServerStates() const {
-  shared_lock<RWMutex> tsl(ts_state_lock_);
+  shared_lock tsl(ts_state_lock_);
   return ts_state_by_uuid_;
 }
 
 void TSManager::GetDescriptorsAvailableForPlacement(TSDescriptorVector* descs) 
const {
   descs->clear();
-  shared_lock<RWMutex> tsl(ts_state_lock_);
-  shared_lock<rw_spinlock> l(lock_);
+  shared_lock tsl(ts_state_lock_);
+  shared_lock l(lock_);
   descs->reserve(servers_by_id_.size());
   for (const TSDescriptorMap::value_type& entry : servers_by_id_) {
     const shared_ptr<TSDescriptor>& ts = entry.second;
@@ -301,7 +301,7 @@ TServerStatePB TSManager::GetTServerStateUnlocked(const 
string& ts_uuid) const {
 }
 
 TServerStatePB TSManager::GetTServerState(const string& ts_uuid) const {
-  shared_lock<RWMutex> l(ts_state_lock_);
+  shared_lock l(ts_state_lock_);
   return GetTServerStateUnlocked(ts_uuid);
 }
 
@@ -337,7 +337,7 @@ Status TSManager::UnregisterTServer(const std::string& 
ts_uuid,
 int TSManager::ClusterSkew() const {
   int min_count = std::numeric_limits<int>::max();
   int max_count = 0;
-  shared_lock<rw_spinlock> l(lock_);
+  shared_lock l(lock_);
   for (const TSDescriptorMap::value_type& entry : servers_by_id_) {
     const shared_ptr<TSDescriptor>& ts = entry.second;
     if (ts->PresumedDead()) {
diff --git a/src/kudu/rpc/messenger.cc b/src/kudu/rpc/messenger.cc
index 15db7d09c..94f14cef4 100644
--- a/src/kudu/rpc/messenger.cc
+++ b/src/kudu/rpc/messenger.cc
@@ -335,7 +335,7 @@ void Messenger::QueueInboundCall(unique_ptr<InboundCall> 
call) {
   // blocking operation and QueueInboundCall is called by the reactor thread.
   //
   // See KUDU-2946 for more details.
-  shared_lock<rw_spinlock> guard(lock_.get_lock());
+  shared_lock guard(lock_.get_lock());
   scoped_refptr<RpcService>* service = FindOrNull(rpc_services_,
                                                   
call->remote_method().service_name());
   if (PREDICT_FALSE(!service)) {
@@ -452,7 +452,7 @@ void Messenger::ScheduleOnReactor(std::function<void(const 
Status&)> func,
 const scoped_refptr<RpcService> Messenger::rpc_service(const string& 
service_name) const {
   scoped_refptr<RpcService> service;
   {
-    shared_lock<rw_spinlock> guard(lock_.get_lock());
+    shared_lock guard(lock_.get_lock());
     if (!FindCopy(rpc_services_, service_name, &service)) {
       return scoped_refptr<RpcService>(nullptr);
     }
diff --git a/src/kudu/rpc/messenger.h b/src/kudu/rpc/messenger.h
index 74de24bbb..c7fe866d7 100644
--- a/src/kudu/rpc/messenger.h
+++ b/src/kudu/rpc/messenger.h
@@ -20,9 +20,8 @@
 #include <functional>
 #include <memory>
 #include <optional>
-#include <shared_mutex>
+#include <shared_mutex> // IWYU pragma: keep
 #include <string>
-#include <type_traits>
 #include <unordered_map>
 #include <utility>
 #include <vector>
@@ -445,7 +444,7 @@ class Messenger {
   }
 
   bool closing() const {
-    std::shared_lock<rw_spinlock> l(lock_.get_lock());
+    std::shared_lock l(lock_.get_lock());
     return state_ == kClosing;
   }
 
diff --git a/src/kudu/rpc/result_tracker.h b/src/kudu/rpc/result_tracker.h
index 79d43c136..f131b0bfb 100644
--- a/src/kudu/rpc/result_tracker.h
+++ b/src/kudu/rpc/result_tracker.h
@@ -133,7 +133,7 @@ class RpcContext;
 // in pseudo-ish code):
 //
 // {
-//   lock_guard<simple_spinlock> l(lock_);
+//   lock_guard l(lock_);
 //   if (follower_op) {
 //     result_tracker_->TrackRpcOrChangeDriver(request_id);
 //     continue_with_op();
diff --git a/src/kudu/rpc/rpcz_store.cc b/src/kudu/rpc/rpcz_store.cc
index 6e46b3492..97e28b2f6 100644
--- a/src/kudu/rpc/rpcz_store.cc
+++ b/src/kudu/rpc/rpcz_store.cc
@@ -135,7 +135,7 @@ MethodSampler* RpczStore::SamplerForCall(InboundCall* call) 
{
   // Most likely, we already have a sampler created for the call once received
   // the very first call for a particular method of an RPC interface.
   {
-    shared_lock<rw_spinlock> l(samplers_lock_.get_lock());
+    shared_lock l(samplers_lock_.get_lock());
     auto it = method_samplers_.find(method_info);
     if (PREDICT_TRUE(it != method_samplers_.end())) {
       return it->second.get();
@@ -233,7 +233,7 @@ void RpczStore::DumpPB(const DumpRpczStoreRequestPB& /* req 
*/,
                        DumpRpczStoreResponsePB* resp) {
   vector<pair<const RpcMethodInfo*, MethodSampler*>> samplers;
   {
-    shared_lock<rw_spinlock> l(samplers_lock_.get_lock());
+    shared_lock l(samplers_lock_.get_lock());
     for (const auto& [mi, ms] : method_samplers_) {
       samplers.emplace_back(mi, ms.get());
     }
diff --git a/src/kudu/security/tls_context.cc b/src/kudu/security/tls_context.cc
index 86017e16d..73b4a44ed 100644
--- a/src/kudu/security/tls_context.cc
+++ b/src/kudu/security/tls_context.cc
@@ -429,7 +429,7 @@ Status TlsContext::AddTrustedCertificate(const Cert& cert) {
 
 Status TlsContext::DumpTrustedCerts(vector<string>* cert_ders) const {
   SCOPED_OPENSSL_NO_PENDING_ERRORS;
-  shared_lock<RWMutex> lock(lock_);
+  shared_lock lock(lock_);
 
   vector<string> ret;
   auto* cert_store = SSL_CTX_get_cert_store(ctx_.get());
@@ -541,7 +541,7 @@ Status TlsContext::GenerateSelfSignedCertAndKey() {
 
 optional<CertSignRequest> TlsContext::GetCsrIfNecessary() const {
   SCOPED_OPENSSL_NO_PENDING_ERRORS;
-  shared_lock<RWMutex> lock(lock_);
+  shared_lock lock(lock_);
   if (csr_) {
     return csr_->Clone();
   }
@@ -629,7 +629,7 @@ Status TlsContext::InitiateHandshake(TlsHandshake* 
handshake) const {
   {
     // This lock is to protect against concurrent change of certificates
     // while calling SSL_new() here.
-    shared_lock<RWMutex> lock(lock_);
+    shared_lock lock(lock_);
     ssl = ssl_make_unique(SSL_new(ctx_.get()));
   }
   if (!ssl) {
diff --git a/src/kudu/security/tls_context.h b/src/kudu/security/tls_context.h
index 4606df566..c00615b7c 100644
--- a/src/kudu/security/tls_context.h
+++ b/src/kudu/security/tls_context.h
@@ -19,9 +19,8 @@
 
 #include <cstdint>
 #include <optional>
-#include <shared_mutex>
+#include <shared_mutex> // IWYU pragma: keep
 #include <string>
-#include <type_traits>
 #include <vector>
 
 #include "kudu/gutil/port.h"
@@ -90,7 +89,7 @@ class TlsContext {
   // Returns true if this TlsContext has been configured with a cert and key 
for
   // use with TLS-encrypted connections.
   bool has_cert() const {
-    std::shared_lock<RWMutex> lock(lock_);
+    std::shared_lock lock(lock_);
     return has_cert_;
   }
 
@@ -98,13 +97,13 @@ class TlsContext {
   // cert and key for use with TLS-encrypted connections. If this method 
returns
   // true, then 'has_trusted_cert' will also return true.
   bool has_signed_cert() const {
-    std::shared_lock<RWMutex> lock(lock_);
+    std::shared_lock lock(lock_);
     return has_cert_ && !csr_;
   }
 
   // Returns true if this TlsContext has at least one certificate in its trust 
store.
   bool has_trusted_cert() const {
-    std::shared_lock<RWMutex> lock(lock_);
+    std::shared_lock lock(lock_);
     return trusted_cert_count_ > 0;
   }
 
@@ -175,7 +174,7 @@ class TlsContext {
   // Return the number of certs that have been marked as trusted.
   // Used by tests.
   int trusted_cert_count_for_tests() const {
-    std::shared_lock<RWMutex> lock(lock_);
+    std::shared_lock lock(lock_);
     return trusted_cert_count_;
   }
 
diff --git a/src/kudu/security/token_signer.cc 
b/src/kudu/security/token_signer.cc
index 692b6fa15..319e76e75 100644
--- a/src/kudu/security/token_signer.cc
+++ b/src/kudu/security/token_signer.cc
@@ -186,7 +186,7 @@ Status TokenSigner::GenerateAuthnToken(string username,
 
 Status TokenSigner::SignToken(SignedTokenPB* token) const {
   CHECK(token);
-  shared_lock<RWMutex> l(lock_);
+  shared_lock l(lock_);
   if (tsk_deque_.empty()) {
     return Status::IllegalState("no token signing key");
   }
@@ -196,7 +196,7 @@ Status TokenSigner::SignToken(SignedTokenPB* token) const {
 }
 
 bool TokenSigner::IsCurrentKeyValid() const {
-  shared_lock<RWMutex> l(lock_);
+  shared_lock l(lock_);
   if (tsk_deque_.empty()) {
     return false;
   }
diff --git a/src/kudu/security/token_verifier.cc 
b/src/kudu/security/token_verifier.cc
index ada36d722..b93ec0375 100644
--- a/src/kudu/security/token_verifier.cc
+++ b/src/kudu/security/token_verifier.cc
@@ -23,7 +23,6 @@
 #include <ostream>
 #include <shared_mutex>
 #include <string>
-#include <type_traits>
 #include <utility>
 #include <vector>
 
@@ -55,7 +54,7 @@ TokenVerifier::~TokenVerifier() {
 }
 
 int64_t TokenVerifier::GetMaxKnownKeySequenceNumber() const {
-  shared_lock<RWMutex> l(lock_);
+  shared_lock l(lock_);
   if (keys_by_seq_.empty()) {
     return -1;
   }
@@ -97,7 +96,7 @@ Status TokenVerifier::ImportKeys(const 
vector<TokenSigningPublicKeyPB>& keys) {
 std::vector<TokenSigningPublicKeyPB> TokenVerifier::ExportKeys(
     int64_t after_sequence_number) const {
   vector<TokenSigningPublicKeyPB> ret;
-  shared_lock<RWMutex> l(lock_);
+  shared_lock l(lock_);
   ret.reserve(keys_by_seq_.size());
   transform(keys_by_seq_.upper_bound(after_sequence_number),
             keys_by_seq_.end(),
@@ -136,7 +135,7 @@ TokenVerificationResult TokenVerifier::VerifyTokenSignature(
   }
 
   {
-    shared_lock<RWMutex> l(lock_);
+    shared_lock l(lock_);
     auto* tsk = FindPointeeOrNull(keys_by_seq_, 
signed_token.signing_key_seq_num());
     if (!tsk) {
       return TokenVerificationResult::UNKNOWN_SIGNING_KEY;
diff --git a/src/kudu/server/webserver.cc b/src/kudu/server/webserver.cc
index ab5f3f149..ecf15fc00 100644
--- a/src/kudu/server/webserver.cc
+++ b/src/kudu/server/webserver.cc
@@ -35,7 +35,6 @@
 #include <shared_mutex>
 #include <sstream>
 #include <string>
-#include <type_traits>
 #include <unordered_map>
 #include <unordered_set>
 #include <utility>
@@ -602,7 +601,7 @@ sq_callback_result_t Webserver::BeginRequestCallback(
       SendResponse(connection, &resp);
       return SQ_HANDLED_OK;
     }
-    shared_lock<RWMutex> l(lock_);
+    shared_lock l(lock_);
     PathHandlerMap::const_iterator it = path_handlers_.find(request_info->uri);
 
     if (it == path_handlers_.end()) {
@@ -1023,7 +1022,7 @@ void Webserver::RenderMainTemplate(
   std::vector<pair<string, PathHandler*>> paths_and_handlers;
 
   {
-    shared_lock<RWMutex> l(lock_);
+    shared_lock l(lock_);
     ej["footer_html"] = footer_html_;
     paths_and_handlers.reserve(path_handlers_.size());
     for (const auto& [path, handler] : path_handlers_) {
diff --git a/src/kudu/tablet/delta_tracker.cc b/src/kudu/tablet/delta_tracker.cc
index 469d6288c..06b8c81d5 100644
--- a/src/kudu/tablet/delta_tracker.cc
+++ b/src/kudu/tablet/delta_tracker.cc
@@ -730,7 +730,7 @@ Status DeltaTracker::Update(Timestamp timestamp,
     }
 
     // TODO(todd): can probably lock this more fine-grained.
-    shared_lock<rw_spinlock> lock(component_lock_);
+    shared_lock lock(component_lock_);
 
     // Should check dms_exists_ here again since there is a gap
     // between the two critical sections defined by component_lock_.
@@ -751,7 +751,7 @@ Status DeltaTracker::Update(Timestamp timestamp,
 
 Status DeltaTracker::CheckRowDeleted(rowid_t row_idx, const IOContext* 
io_context,
                                      bool* deleted, ProbeStats* stats) const {
-  shared_lock<rw_spinlock> lock(component_lock_);
+  shared_lock lock(component_lock_);
 
   *deleted = false;
   // Check if the row has a deletion in DeltaMemStore.
@@ -905,7 +905,7 @@ bool DeltaTracker::GetDeltaMemStoreInfo(size_t* size_bytes, 
MonoTime* creation_t
   // component_lock_. We need to check again after taking the lock in case we
   // raced with a DMS flush.
   if (dms_exists_) {
-    shared_lock<rw_spinlock> lock(component_lock_);
+    shared_lock lock(component_lock_);
     if (dms_exists_) {
       *size_bytes = dms_->EstimateSize();
       *creation_time = dms_->creation_time();
@@ -916,27 +916,27 @@ bool DeltaTracker::GetDeltaMemStoreInfo(size_t* 
size_bytes, MonoTime* creation_t
 }
 
 size_t DeltaTracker::DeltaMemStoreSize() const {
-  shared_lock<rw_spinlock> lock(component_lock_);
+  shared_lock lock(component_lock_);
   return dms_exists_ ? dms_->EstimateSize() : 0;
 }
 
 int64_t DeltaTracker::MinUnflushedLogIndex() const {
-  shared_lock<rw_spinlock> lock(component_lock_);
+  shared_lock lock(component_lock_);
   return dms_exists_ ? dms_->MinLogIndex() : 0;
 }
 
 size_t DeltaTracker::CountUndoDeltaStores() const {
-  shared_lock<rw_spinlock> lock(component_lock_);
+  shared_lock lock(component_lock_);
   return undo_delta_stores_.size();
 }
 
 size_t DeltaTracker::CountRedoDeltaStores() const {
-  shared_lock<rw_spinlock> lock(component_lock_);
+  shared_lock lock(component_lock_);
   return redo_delta_stores_.size();
 }
 
 uint64_t DeltaTracker::UndoDeltaOnDiskSize() const {
-  shared_lock<rw_spinlock> lock(component_lock_);
+  shared_lock lock(component_lock_);
   uint64_t size = 0;
   for (const auto& ds : undo_delta_stores_) {
     size += ds->EstimateSize();
@@ -945,7 +945,7 @@ uint64_t DeltaTracker::UndoDeltaOnDiskSize() const {
 }
 
 uint64_t DeltaTracker::RedoDeltaOnDiskSize() const {
-  shared_lock<rw_spinlock> lock(component_lock_);
+  shared_lock lock(component_lock_);
   uint64_t size = 0;
   for (const auto& ds : redo_delta_stores_) {
     size += ds->EstimateSize();
@@ -954,7 +954,7 @@ uint64_t DeltaTracker::RedoDeltaOnDiskSize() const {
 }
 
 void DeltaTracker::GetColumnIdsToCompact(std::vector<ColumnId>* col_ids) const 
{
-  shared_lock<rw_spinlock> lock(component_lock_);
+  shared_lock lock(component_lock_);
 
   set<ColumnId> column_ids_to_compact;
   uint64_t all_delete_op_delta_store_cnt = 0;
@@ -992,7 +992,7 @@ void 
DeltaTracker::GetColumnIdsToCompact(std::vector<ColumnId>* col_ids) const {
 bool DeltaTracker::DeltaStoreNeedToBeCompacted() const {
   uint64_t all_delete_op_delta_store_cnt = 0;
   {
-    shared_lock<rw_spinlock> lock(component_lock_);
+    shared_lock lock(component_lock_);
 
     for (const auto& ds: redo_delta_stores_) {
       if (!ds->has_delta_stats()) {
@@ -1017,7 +1017,7 @@ bool DeltaTracker::DeltaStoreNeedToBeCompacted() const {
 }
 
 Status DeltaTracker::InitAllDeltaStoresForTests(WhichStores stores) {
-  shared_lock<rw_spinlock> lock(component_lock_);
+  shared_lock lock(component_lock_);
   if (stores == UNDOS_AND_REDOS || stores == UNDOS_ONLY) {
     for (const shared_ptr<DeltaStore>& ds : undo_delta_stores_) {
       RETURN_NOT_OK(ds->Init(nullptr));
@@ -1032,13 +1032,13 @@ Status 
DeltaTracker::InitAllDeltaStoresForTests(WhichStores stores) {
 }
 
 int64_t DeltaTracker::CountDeletedRows() const {
-  shared_lock<rw_spinlock> lock(component_lock_);
+  shared_lock lock(component_lock_);
   DCHECK_GE(deleted_row_count_, 0);
   return deleted_row_count_ + (dms_exists_ ? dms_->deleted_row_count() : 0);
 }
 
 int64_t DeltaTracker::CountDeletedRowsInRedos() const {
-  shared_lock<rw_spinlock> lock(component_lock_);
+  shared_lock lock(component_lock_);
 
   int64_t delete_count = 0;
   for (const shared_ptr<DeltaStore>& ds : redo_delta_stores_) {
diff --git a/src/kudu/tablet/diskrowset.cc b/src/kudu/tablet/diskrowset.cc
index 1c01ded1a..143c0b89b 100644
--- a/src/kudu/tablet/diskrowset.cc
+++ b/src/kudu/tablet/diskrowset.cc
@@ -798,7 +798,7 @@ uint64_t DiskRowSet::OnDiskBaseDataSize() const {
 
 uint64_t DiskRowSet::OnDiskBaseDataColumnSize(const ColumnId& col_id) const {
   DCHECK(open_);
-  shared_lock<rw_spinlock> l(component_lock_);
+  shared_lock l(component_lock_);
   if (base_data_->has_data_for_column_id(col_id)) {
     return base_data_->OnDiskColumnDataSize(col_id);
   }
diff --git a/src/kudu/tablet/ops/write_op.cc b/src/kudu/tablet/ops/write_op.cc
index f16cb7d9d..dba3108c1 100644
--- a/src/kudu/tablet/ops/write_op.cc
+++ b/src/kudu/tablet/ops/write_op.cc
@@ -430,7 +430,7 @@ void WriteOpState::set_txn_rowsets(const 
scoped_refptr<TxnRowSets>& rowsets) {
 
 void WriteOpState::AcquireSchemaLock(rw_semaphore* schema_lock) {
   TRACE("Acquiring schema lock in shared mode");
-  shared_lock<rw_semaphore> temp(*schema_lock);
+  shared_lock temp(*schema_lock);
   schema_lock_.swap(temp);
   TRACE("Acquired schema lock");
 }
diff --git a/src/kudu/tablet/tablet.cc b/src/kudu/tablet/tablet.cc
index 389114723..bb96b1af7 100644
--- a/src/kudu/tablet/tablet.cc
+++ b/src/kudu/tablet/tablet.cc
@@ -596,7 +596,7 @@ void Tablet::SplitKeyRange(const EncodedKey* start_key,
                            std::vector<KeyRange>* key_range_info) {
   shared_ptr<RowSetTree> rowsets_copy;
   {
-    shared_lock<rw_spinlock> l(component_lock_);
+    shared_lock l(component_lock_);
     rowsets_copy = components_->rowsets;
   }
 
@@ -1099,7 +1099,7 @@ Status Tablet::MutateRowUnlocked(const IOContext* 
io_context,
 }
 
 void Tablet::StartApplying(WriteOpState* op_state) {
-  shared_lock<rw_spinlock> l(component_lock_);
+  shared_lock l(component_lock_);
 
   const auto txn_id = op_state->txn_id();
   if (txn_id) {
@@ -1786,7 +1786,7 @@ void Tablet::SetFlushCompactCommonHooksForTests(
 }
 
 int32_t Tablet::CurrentMrsIdForTests() const {
-  shared_lock<rw_spinlock> l(component_lock_);
+  shared_lock l(component_lock_);
   return components_->memrowset->mrs_id();
 }
 
@@ -1805,7 +1805,7 @@ Status 
Tablet::PickRowSetsToCompact(RowSetsInCompactionOrFlush *picked,
   // in tablet.h for details on why that would be bad.
   shared_ptr<RowSetTree> rowsets_copy;
   {
-    shared_lock<rw_spinlock> l(component_lock_);
+    shared_lock l(component_lock_);
     rowsets_copy = components_->rowsets;
   }
 
@@ -1831,7 +1831,7 @@ Status 
Tablet::PickRowSetsToCompact(RowSetsInCompactionOrFlush *picked,
     VLOG_WITH_PREFIX(2) << "Compaction quality: " << quality;
   }
 
-  shared_lock<rw_spinlock> l(component_lock_);
+  shared_lock l(component_lock_);
   for (const shared_ptr<RowSet>& rs : components_->rowsets->all_rowsets()) {
     if (picked_set.erase(rs.get()) == 0) {
       // Not picked.
@@ -1898,7 +1898,7 @@ bool Tablet::compaction_enabled() const {
 void Tablet::GetRowSetsForTests(RowSetVector* out) {
   shared_ptr<RowSetTree> rowsets_copy;
   {
-    shared_lock<rw_spinlock> l(component_lock_);
+    shared_lock l(component_lock_);
     rowsets_copy = components_->rowsets;
   }
   for (const shared_ptr<RowSet>& rs : rowsets_copy->all_rowsets()) {
@@ -2375,7 +2375,7 @@ void Tablet::UpdateCompactionStats(MaintenanceOpStats* 
stats) {
 
   shared_ptr<RowSetTree> rowsets_copy;
   {
-    shared_lock<rw_spinlock> l(component_lock_);
+    shared_lock l(component_lock_);
     rowsets_copy = components_->rowsets;
   }
 
@@ -2450,7 +2450,7 @@ void Tablet::UpdateCompactionStats(MaintenanceOpStats* 
stats) {
 
 
 Status Tablet::DebugDump(vector<string> *lines) {
-  shared_lock<rw_spinlock> l(component_lock_);
+  shared_lock l(component_lock_);
 
   LOG_STRING(INFO, lines) << "Dumping tablet:";
   LOG_STRING(INFO, lines) << "---------------------------";
@@ -2471,7 +2471,7 @@ Status Tablet::CaptureConsistentIterators(
     const ScanSpec* spec,
     vector<IterWithBounds>* iters) const {
 
-  shared_lock<rw_spinlock> l(component_lock_);
+  shared_lock l(component_lock_);
   RETURN_IF_STOPPED_OR_CHECK_STATE(kOpen);
 
   // Construct all the iterators locally first, so that if we fail
@@ -2646,7 +2646,7 @@ size_t Tablet::OnDiskDataSize() const {
 }
 
 uint64_t Tablet::LastReadElapsedSeconds() const {
-  shared_lock<rw_spinlock> l(last_rw_time_lock_);
+  shared_lock l(last_rw_time_lock_);
   DCHECK(last_read_time_.Initialized());
   return static_cast<uint64_t>((MonoTime::Now() - 
last_read_time_).ToSeconds());
 }
@@ -2657,7 +2657,7 @@ void Tablet::UpdateLastReadTime() {
 }
 
 uint64_t Tablet::LastWriteElapsedSeconds() const {
-  shared_lock<rw_spinlock> l(last_rw_time_lock_);
+  shared_lock l(last_rw_time_lock_);
   DCHECK(last_write_time_.Initialized());
   return static_cast<uint64_t>((MonoTime::Now() - 
last_write_time_).ToSeconds());
 }
@@ -3169,7 +3169,7 @@ int64_t Tablet::CountRedoDeltasForTests() const {
 }
 
 size_t Tablet::num_rowsets() const {
-  shared_lock<rw_spinlock> l(component_lock_);
+  shared_lock l(component_lock_);
   return components_ ? components_->rowsets->all_rowsets().size() : 0;
 }
 
@@ -3179,7 +3179,7 @@ void Tablet::PrintRSLayout(ostream* o) {
 
   shared_ptr<RowSetTree> rowsets_copy;
   {
-    shared_lock<rw_spinlock> l(component_lock_);
+    shared_lock l(component_lock_);
     rowsets_copy = components_->rowsets;
   }
   std::lock_guard compact_lock(compact_select_lock_);
diff --git a/src/kudu/tablet/tablet.h b/src/kudu/tablet/tablet.h
index 5bb60decd..377d953ba 100644
--- a/src/kudu/tablet/tablet.h
+++ b/src/kudu/tablet/tablet.h
@@ -24,9 +24,8 @@
 #include <memory>
 #include <mutex>
 #include <ostream>
-#include <shared_mutex>
+#include <shared_mutex> // IWYU pragma: keep
 #include <string>
-#include <type_traits>
 #include <unordered_map>
 #include <unordered_set>
 #include <utility>
@@ -781,12 +780,12 @@ class Tablet {
                                  const RowSetVector &to_add);
 
   void GetComponents(scoped_refptr<TabletComponents>* comps) const {
-    std::shared_lock<rw_spinlock> l(component_lock_);
+    std::shared_lock l(component_lock_);
     *comps = CHECK_NOTNULL(components_.get());
   }
 
   void GetComponentsOrNull(scoped_refptr<TabletComponents>* comps) const {
-    std::shared_lock<rw_spinlock> l(component_lock_);
+    std::shared_lock l(component_lock_);
     *comps = components_;
   }
 
diff --git a/src/kudu/tablet/txn_participant.cc 
b/src/kudu/tablet/txn_participant.cc
index d856ee7f1..cc89ff577 100644
--- a/src/kudu/tablet/txn_participant.cc
+++ b/src/kudu/tablet/txn_participant.cc
@@ -71,7 +71,7 @@ void Txn::AcquireWriteLock(std::unique_lock<rw_semaphore>* 
txn_lock) {
 }
 
 void Txn::AcquireReadLock(shared_lock<rw_semaphore>* txn_lock) {
-  shared_lock<rw_semaphore> l(state_lock_);
+  shared_lock l(state_lock_);
   *txn_lock = std::move(l);
 }
 
diff --git a/src/kudu/tools/rebalancer_tool.cc 
b/src/kudu/tools/rebalancer_tool.cc
index 2cd098710..5c95e19ba 100644
--- a/src/kudu/tools/rebalancer_tool.cc
+++ b/src/kudu/tools/rebalancer_tool.cc
@@ -151,7 +151,7 @@ Status RebalancerTool::PrintStats(ostream& out) {
   sort(locations.begin(), locations.end());
 
   for (const auto& location : locations) {
-    shared_lock<decltype(ksck_lock_)> guard(ksck_lock_);
+    shared_lock guard(ksck_lock_);
     ClusterRawInfo raw_info;
     RETURN_NOT_OK(KsckResultsToClusterRawInfo(location, ksck_->results(), 
&raw_info));
     ClusterInfo ci;
@@ -176,7 +176,7 @@ Status RebalancerTool::Run(RunStatus* result_status, 
size_t* moves_count) {
 
   ClusterRawInfo raw_info;
   {
-    shared_lock<decltype(ksck_lock_)> guard(ksck_lock_);
+    shared_lock guard(ksck_lock_);
     RETURN_NOT_OK(KsckResultsToClusterRawInfo(
         nullopt, ksck_->results(), &raw_info));
   }
@@ -817,7 +817,7 @@ Status RebalancerTool::RunWith(Runner* runner, RunStatus* 
result_status) {
 Status RebalancerTool::GetClusterRawInfo(const optional<string>& location,
                                          ClusterRawInfo* raw_info) {
   RETURN_NOT_OK(RefreshKsckResults());
-  shared_lock<decltype(ksck_lock_)> guard(ksck_lock_);
+  shared_lock guard(ksck_lock_);
   return KsckResultsToClusterRawInfo(location, ksck_->results(), raw_info);
 }
 
diff --git a/src/kudu/tserver/scanners.cc b/src/kudu/tserver/scanners.cc
index cc5d35858..afe158513 100644
--- a/src/kudu/tserver/scanners.cc
+++ b/src/kudu/tserver/scanners.cc
@@ -228,7 +228,7 @@ Status ScannerManager::LookupScanner(const string& 
scanner_id,
                                      SharedScanner* scanner) {
   SharedScanner ret;
   ScannerMapStripe& stripe = GetStripeByScannerId(scanner_id);
-  shared_lock<RWMutex> l(stripe.lock_);
+  shared_lock l(stripe.lock_);
   bool found_scanner = FindCopy(stripe.scanners_by_id_, scanner_id, &ret);
   if (!found_scanner) {
     *error_code = TabletServerErrorPB::SCANNER_EXPIRED;
@@ -284,7 +284,7 @@ bool ScannerManager::UnregisterScanner(const string& 
scanner_id) {
 size_t ScannerManager::CountActiveScanners() const {
   size_t total = 0;
   for (const ScannerMapStripe* e : scanner_maps_) {
-    shared_lock<RWMutex> l(e->lock_);
+    shared_lock l(e->lock_);
     total += e->scanners_by_id_.size();
   }
   return total;
@@ -295,7 +295,7 @@ size_t ScannerManager::CountSlowScans() const {
   const MonoTime now = MonoTime::Now();
   const MonoDelta slow_threshold = 
MonoDelta::FromMilliseconds(FLAGS_slow_scanner_threshold_ms);
   for (const auto* stripe : scanner_maps_) {
-    shared_lock<RWMutex> l(stripe->lock_);
+    shared_lock l(stripe->lock_);
     for (const auto& it : stripe->scanners_by_id_) {
       const SharedScanner& scanner = it.second;
       const MonoTime start_time = scanner->start_time();
@@ -311,7 +311,7 @@ size_t ScannerManager::CountSlowScans() const {
 
 void ScannerManager::ListScanners(std::vector<SharedScanner>* scanners) const {
   for (const ScannerMapStripe* stripe : scanner_maps_) {
-    shared_lock<RWMutex> l(stripe->lock_);
+    shared_lock l(stripe->lock_);
     for (const auto& se : stripe->scanners_by_id_) {
       scanners->push_back(se.second);
     }
@@ -321,7 +321,7 @@ void 
ScannerManager::ListScanners(std::vector<SharedScanner>* scanners) const {
 vector<SharedScanDescriptor> ScannerManager::ListScans() const {
   unordered_map<string, SharedScanDescriptor> scans;
   for (const ScannerMapStripe* stripe : scanner_maps_) {
-    shared_lock<RWMutex> l(stripe->lock_);
+    shared_lock l(stripe->lock_);
     for (const auto& se : stripe->scanners_by_id_) {
       if (se.second->is_initted()) {
         SharedScanDescriptor desc = se.second->Descriptor();
@@ -332,7 +332,7 @@ vector<SharedScanDescriptor> ScannerManager::ListScans() 
const {
   }
 
   {
-    shared_lock<rw_spinlock> l(completed_scans_lock_.get_lock());
+    shared_lock l(completed_scans_lock_.get_lock());
     // A scanner in 'scans' may have completed between the above loop and here.
     // As we'd rather have the finalized descriptor of the completed scan,
     // update over the old descriptor in this case.
@@ -365,7 +365,7 @@ vector<SharedScanDescriptor> 
ScannerManager::ListSlowScans() const {
   // Get all the scans first.
   unordered_map<string, SharedScanDescriptor> scans;
   {
-    shared_lock<rw_spinlock> l(slow_scans_lock_.get_lock());
+    shared_lock l(slow_scans_lock_.get_lock());
     for (const auto& scan : slow_scans_) {
       InsertOrUpdate(&scans, scan->scanner_id, scan);
     }
@@ -622,7 +622,7 @@ SharedScanDescriptor Scanner::Descriptor() const {
 }
 
 CpuTimes Scanner::cpu_times() const {
-  shared_lock<RWMutex> l(cpu_times_lock_);
+  shared_lock l(cpu_times_lock_);
   return cpu_times_;
 }
 
diff --git a/src/kudu/tserver/ts_tablet_manager.cc 
b/src/kudu/tserver/ts_tablet_manager.cc
index 1bc5fe1c0..30715d70e 100644
--- a/src/kudu/tserver/ts_tablet_manager.cc
+++ b/src/kudu/tserver/ts_tablet_manager.cc
@@ -627,7 +627,7 @@ Status TSTabletManager::WaitForAllBootstrapsToFinish() {
 
   open_tablet_pool_->Wait();
 
-  shared_lock<RWMutex> l(lock_);
+  shared_lock l(lock_);
   for (const TabletMap::value_type& entry : tablet_map_) {
     if (entry.second->state() == tablet::FAILED) {
       return entry.second->error();
@@ -773,7 +773,7 @@ void TSTabletManager::StartTabletCopy(
   optional<string> transition;
   {
     // Lock must be dropped before executing callbacks.
-    shared_lock<RWMutex> lock(lock_);
+    shared_lock lock(lock_);
     auto* t = FindOrNull(transition_in_progress_, tablet_id);
     if (t) {
       transition = *t;
@@ -1590,7 +1590,7 @@ void TSTabletManager::RegisterTablet(const string& 
tablet_id,
 
 bool TSTabletManager::LookupTablet(const string& tablet_id,
                                    scoped_refptr<TabletReplica>* replica) 
const {
-  shared_lock<RWMutex> l(lock_);
+  shared_lock l(lock_);
   return LookupTabletUnlocked(tablet_id, replica);
 }
 
@@ -1618,7 +1618,7 @@ const NodeInstancePB& TSTabletManager::NodeInstance() 
const {
 
 void TSTabletManager::GetTabletReplicasImpl(
     vector<scoped_refptr<TabletReplica>>* replicas) const {
-  shared_lock<RWMutex> l(lock_);
+  shared_lock l(lock_);
   AppendValuesFromMap(tablet_map_, replicas);
 }
 
@@ -1640,7 +1640,7 @@ void TSTabletManager::MarkTabletsDirty(const 
vector<string>& tablet_ids, const s
 
 int TSTabletManager::GetNumLiveTablets() const {
   int count = 0;
-  shared_lock<RWMutex> l(lock_);
+  shared_lock l(lock_);
   for (const auto& entry : tablet_map_) {
     tablet::TabletStatePB state = entry.second->state();
     if (state == tablet::BOOTSTRAPPING ||
@@ -1653,7 +1653,7 @@ int TSTabletManager::GetNumLiveTablets() const {
 
 TabletNumByDimensionMap TSTabletManager::GetNumLiveTabletsByDimension() const {
   TabletNumByDimensionMap result;
-  shared_lock<RWMutex> l(lock_);
+  shared_lock l(lock_);
   for (const auto& entry : tablet_map_) {
     tablet::TabletStatePB state = entry.second->state();
     if (state == tablet::BOOTSTRAPPING ||
@@ -1669,7 +1669,7 @@ TabletNumByDimensionMap 
TSTabletManager::GetNumLiveTabletsByDimension() const {
 
 TabletNumByRangePerTableMap 
TSTabletManager::GetNumLiveTabletsByRangePerTable() const {
   TabletNumByRangePerTableMap result;
-  shared_lock<RWMutex> l(lock_);
+  shared_lock l(lock_);
   for (const auto& entry : tablet_map_) {
     tablet::TabletStatePB state = entry.second->state();
     if (state == tablet::BOOTSTRAPPING ||
@@ -1712,7 +1712,7 @@ void TSTabletManager::TxnStalenessTrackerTask() {
 
     vector<scoped_refptr<TabletReplica>> replicas;
     {
-      shared_lock<RWMutex> l(lock_);
+      shared_lock l(lock_);
       for (const auto& elem : tablet_map_) {
         auto r = elem.second;
         // Find the running txn status tablet replicas.
@@ -1810,7 +1810,7 @@ void 
TSTabletManager::PopulateIncrementalTabletReport(TabletReportPB* report,
   vector<scoped_refptr<tablet::TabletReplica>> to_report;
   to_report.reserve(tablet_ids.size());
   {
-    shared_lock<RWMutex> shared_lock(lock_);
+    shared_lock shared_lock(lock_);
     for (const auto& id : tablet_ids) {
       const scoped_refptr<tablet::TabletReplica>* replica =
           FindOrNull(tablet_map_, id);
@@ -2035,7 +2035,7 @@ Status 
TSTabletManager::WaitForNoTransitionsForTests(const MonoDelta& timeout) c
   const MonoTime start = MonoTime::Now();
   while (MonoTime::Now() - start < timeout) {
     {
-      shared_lock<RWMutex> lock(lock_);
+      shared_lock lock(lock_);
       if (transition_in_progress_.empty()) {
         return Status::OK();
       }
diff --git a/src/kudu/tserver/ts_tablet_manager.h 
b/src/kudu/tserver/ts_tablet_manager.h
index 144afbe95..289ed0108 100644
--- a/src/kudu/tserver/ts_tablet_manager.h
+++ b/src/kudu/tserver/ts_tablet_manager.h
@@ -23,11 +23,10 @@
 #include <map>
 #include <memory>
 #include <optional>
-#include <shared_mutex>
+#include <shared_mutex> // IWYU pragma: keep
 #include <string>
 #include <unordered_map>
 #include <unordered_set>
-#include <type_traits>
 #include <vector>
 
 #include <gtest/gtest_prod.h>
@@ -388,7 +387,7 @@ class TSTabletManager : public 
tserver::TabletReplicaLookupIf {
                                  int64_t last_logged_term);
 
   TSTabletManagerStatePB state() const {
-    std::shared_lock<RWMutex> l(lock_);
+    std::shared_lock l(lock_);
     return state_;
   }
 
diff --git a/src/kudu/util/locks.h b/src/kudu/util/locks.h
index 48d6a73af..1758a2bda 100644
--- a/src/kudu/util/locks.h
+++ b/src/kudu/util/locks.h
@@ -147,17 +147,17 @@ class rw_spinlock {
 //
 //   // Lock shared:
 //   {
-//     std::shared_lock<rw_spinlock> lock(mylock.get_lock());
+//     std::shared_lock lock(mylock.get_lock());
 //     ...
 //   }
 //
 //   // Lock exclusive:
 //
 //   {
-//     std::lock_guard<percpu_rwlock> lock(mylock);
+//     std::lock_guard lock(mylock);
 //     ...
 //   }
-class percpu_rwlock {
+class percpu_rwlock { // NOLINT(readability-identifier-naming)
  public:
   percpu_rwlock() {
 #if defined(__APPLE__) || defined(THREAD_SANITIZER)
diff --git a/src/kudu/util/rw_mutex-test.cc b/src/kudu/util/rw_mutex-test.cc
index 5f194f7e5..24b2592be 100644
--- a/src/kudu/util/rw_mutex-test.cc
+++ b/src/kudu/util/rw_mutex-test.cc
@@ -15,12 +15,13 @@
 // specific language governing permissions and limitations
 // under the License.
 
+#include "kudu/util/rw_mutex.h"
+
 #include <cstdint>
 #include <mutex>
 #include <ostream>
 #include <shared_mutex>
 #include <thread>
-#include <type_traits>
 #include <vector>
 
 #include <glog/logging.h>
@@ -28,7 +29,6 @@
 
 #include "kudu/util/atomic.h"
 #include "kudu/util/monotime.h"
-#include "kudu/util/rw_mutex.h"
 #include "kudu/util/test_util.h"
 
 using std::lock_guard;
@@ -85,13 +85,13 @@ TEST_P(RWMutexTest, TestDeadlocks) {
   for (int i = 0; i < 2; i++) {
     threads.emplace_back([&](){
       while (!done.Load()) {
-        shared_lock<RWMutex> l(lock_);
+        shared_lock l(lock_);
         number_of_reads.Increment();
       }
     });
     threads.emplace_back([&](){
       while (!done.Load()) {
-        shared_lock<RWMutex> l(lock_, try_to_lock);
+        shared_lock l(lock_, try_to_lock);
         if (l.owns_lock()) {
           number_of_reads.Increment();
         }
@@ -105,7 +105,7 @@ TEST_P(RWMutexTest, TestDeadlocks) {
     t.join();
   }
 
-  shared_lock<RWMutex> l(lock_);
+  shared_lock l(lock_);
   LOG(INFO) << "Number of writes: " << number_of_writes;
   LOG(INFO) << "Number of reads: " << number_of_reads.Load();
 }
diff --git a/src/kudu/util/rw_semaphore-test.cc 
b/src/kudu/util/rw_semaphore-test.cc
index 814ff586f..312b74dfe 100644
--- a/src/kudu/util/rw_semaphore-test.cc
+++ b/src/kudu/util/rw_semaphore-test.cc
@@ -20,7 +20,6 @@
 #include <cstdint>
 #include <shared_mutex>
 #include <thread>
-#include <type_traits>
 #include <vector>
 
 #include <glog/logging.h>
@@ -57,7 +56,7 @@ void Writer(SharedState* state) {
 void Reader(SharedState* state) {
   int prev_val = 0;
   while (true) {
-    shared_lock<rw_semaphore> l(state->sem);
+    shared_lock l(state->sem);
     // The int var should only be seen to increase.
     CHECK_GE(state->int_var, prev_val);
     prev_val = state->int_var;
diff --git a/src/kudu/util/thread.cc b/src/kudu/util/thread.cc
index 0293ef1a4..b149447cd 100644
--- a/src/kudu/util/thread.cc
+++ b/src/kudu/util/thread.cc
@@ -427,7 +427,7 @@ void ThreadMgr::ThreadPathHandler(const 
WebCallbackRegistry::WebRequest& req,
     // imposed on new threads that acquire the lock in write mode.
     vector<ThreadDescriptor> descriptors_to_print;
     if (!requested_all) {
-      shared_lock<decltype(lock_)> l(lock_);
+      shared_lock l(lock_);
       const auto* category = FindOrNull(thread_categories_, *category_name);
       if (!category) {
         return;
@@ -436,7 +436,7 @@ void ThreadMgr::ThreadPathHandler(const 
WebCallbackRegistry::WebRequest& req,
         descriptors_to_print.emplace_back(elem.second);
       }
     } else {
-      shared_lock<decltype(lock_)> l(lock_);
+      shared_lock l(lock_);
       for (const auto& category : thread_categories_) {
         for (const auto& elem : category.second) {
           descriptors_to_print.emplace_back(elem.second);
@@ -454,7 +454,7 @@ void ThreadMgr::ThreadPathHandler(const 
WebCallbackRegistry::WebRequest& req,
     vector<pair<string, uint64_t>> thread_categories_info;
     {
       // See comment above regarding short critical sections.
-      shared_lock<decltype(lock_)> l(lock_);
+      shared_lock l(lock_);
       thread_categories_info.reserve(thread_categories_.size());
       for (const auto& category : thread_categories_) {
         thread_categories_info.emplace_back(category.first, 
category.second.size());

Reply via email to