This is an automated email from the ASF dual-hosted git repository.

apitrou pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/arrow.git


The following commit(s) were added to refs/heads/main by this push:
     new 1b5e26dc2f GH-38971: [C++] Fix spelling (filesystem) (#38972)
1b5e26dc2f is described below

commit 1b5e26dc2fcb1620c047ad756980733f50456079
Author: Josh Soref <[email protected]>
AuthorDate: Tue Dec 5 10:36:33 2023 -0500

    GH-38971: [C++] Fix spelling (filesystem) (#38972)
    
    
    
    ### Rationale for this change
    
    ### What changes are included in this PR?
    
    Spelling fixes to cpp/src/arrow/filesystem/
    
    ### Are these changes tested?
    
    ### Are there any user-facing changes?
    
    * Closes: #38971
    
    Authored-by: Josh Soref <[email protected]>
    Signed-off-by: Antoine Pitrou <[email protected]>
---
 cpp/src/arrow/filesystem/azurefs.cc      |  4 ++--
 cpp/src/arrow/filesystem/azurefs_test.cc |  4 ++--
 cpp/src/arrow/filesystem/localfs.cc      |  2 +-
 cpp/src/arrow/filesystem/s3fs.cc         |  6 +++---
 cpp/src/arrow/filesystem/s3fs.h          |  4 ++--
 cpp/src/arrow/filesystem/s3fs_test.cc    | 10 +++++-----
 cpp/src/arrow/filesystem/test_util.h     |  2 +-
 7 files changed, 16 insertions(+), 16 deletions(-)

diff --git a/cpp/src/arrow/filesystem/azurefs.cc 
b/cpp/src/arrow/filesystem/azurefs.cc
index ecc8d06f97..4efb802f8b 100644
--- a/cpp/src/arrow/filesystem/azurefs.cc
+++ b/cpp/src/arrow/filesystem/azurefs.cc
@@ -335,7 +335,7 @@ class ObjectInputFile final : public io::RandomAccessFile {
       }
       return internal::ExceptionToStatus(
           "GetProperties failed for '" + blob_client_->GetUrl() +
-              "' with an unexpected Azure error. Can not initialise an 
ObjectInputFile "
+              "' with an unexpected Azure error. Cannot initialise an 
ObjectInputFile "
               "without knowing the file size.",
           exception);
     }
@@ -561,7 +561,7 @@ class ObjectAppendStream final : public io::OutputStream {
         } else {
           return internal::ExceptionToStatus(
               "GetProperties failed for '" + block_blob_client_->GetUrl() +
-                  "' with an unexpected Azure error. Can not initialise an "
+                  "' with an unexpected Azure error. Cannot initialise an "
                   "ObjectAppendStream without knowing whether a file already 
exists at "
                   "this path, and if it exists, its size.",
               exception);
diff --git a/cpp/src/arrow/filesystem/azurefs_test.cc 
b/cpp/src/arrow/filesystem/azurefs_test.cc
index b6f7598769..1828c052e7 100644
--- a/cpp/src/arrow/filesystem/azurefs_test.cc
+++ b/cpp/src/arrow/filesystem/azurefs_test.cc
@@ -420,7 +420,7 @@ TEST_F(AzuriteFileSystemTest, DetectHierarchicalNamespace) {
 TEST_F(AzuriteFileSystemTest, 
DetectHierarchicalNamespaceFailsWithMissingContainer) {
   auto hierarchical_namespace = internal::HierarchicalNamespaceDetector();
   ASSERT_OK(hierarchical_namespace.Init(datalake_service_client_.get()));
-  ASSERT_NOT_OK(hierarchical_namespace.Enabled("non-existent-container"));
+  ASSERT_NOT_OK(hierarchical_namespace.Enabled("nonexistent-container"));
 }
 
 TEST_F(AzuriteFileSystemTest, GetFileInfoAccount) {
@@ -433,7 +433,7 @@ TEST_F(AzuriteFileSystemTest, GetFileInfoAccount) {
 TEST_F(AzuriteFileSystemTest, GetFileInfoContainer) {
   AssertFileInfo(fs_.get(), PreexistingContainerName(), FileType::Directory);
 
-  AssertFileInfo(fs_.get(), "non-existent-container", FileType::NotFound);
+  AssertFileInfo(fs_.get(), "nonexistent-container", FileType::NotFound);
 
   // URI
   ASSERT_RAISES(Invalid, fs_->GetFileInfo("abfs://" + 
PreexistingContainerName()));
diff --git a/cpp/src/arrow/filesystem/localfs.cc 
b/cpp/src/arrow/filesystem/localfs.cc
index e030014159..d440629a02 100644
--- a/cpp/src/arrow/filesystem/localfs.cc
+++ b/cpp/src/arrow/filesystem/localfs.cc
@@ -304,7 +304,7 @@ namespace {
 /// Workhorse for streaming async implementation of `GetFileInfo`
 /// (`GetFileInfoGenerator`).
 ///
-/// There are two variants of async discovery functions suported:
+/// There are two variants of async discovery functions supported:
 /// 1. `DiscoverDirectoryFiles`, which parallelizes traversal of individual 
directories
 ///    so that each directory results are yielded as a separate 
`FileInfoGenerator` via
 ///    an underlying `DiscoveryImplIterator`, which delivers items in chunks 
(default size
diff --git a/cpp/src/arrow/filesystem/s3fs.cc b/cpp/src/arrow/filesystem/s3fs.cc
index 26a1530660..2cc907a63c 100644
--- a/cpp/src/arrow/filesystem/s3fs.cc
+++ b/cpp/src/arrow/filesystem/s3fs.cc
@@ -1042,7 +1042,7 @@ class RegionResolver {
     lock.unlock();
     ARROW_ASSIGN_OR_RAISE(auto region, ResolveRegionUncached(bucket));
     lock.lock();
-    // Note we don't cache a non-existent bucket, as the bucket could be 
created later
+    // Note we don't cache a nonexistent bucket, as the bucket could be 
created later
     cache_[bucket] = region;
     return region;
   }
@@ -1546,7 +1546,7 @@ class ObjectOutputStream final : public io::OutputStream {
       nbytes -= offset;
     };
 
-    // Handle case where we have some bytes bufferred from prior calls.
+    // Handle case where we have some bytes buffered from prior calls.
     if (current_part_size_ > 0) {
       // Try to fill current buffer
       const int64_t to_copy = std::min(nbytes, kPartUploadSize - 
current_part_size_);
@@ -3007,7 +3007,7 @@ S3GlobalOptions S3GlobalOptions::Defaults() {
   auto result = arrow::internal::GetEnvVar("ARROW_S3_LOG_LEVEL");
 
   if (result.ok()) {
-    // Extract, trim, and downcase the value of the enivronment variable
+    // Extract, trim, and downcase the value of the environment variable
     auto value =
         
arrow::internal::AsciiToLower(arrow::internal::TrimString(result.ValueUnsafe()));
 
diff --git a/cpp/src/arrow/filesystem/s3fs.h b/cpp/src/arrow/filesystem/s3fs.h
index 9900a9a1c0..90333e05e7 100644
--- a/cpp/src/arrow/filesystem/s3fs.h
+++ b/cpp/src/arrow/filesystem/s3fs.h
@@ -130,7 +130,7 @@ struct ARROW_EXPORT S3Options {
   std::string role_arn;
   /// Optional identifier for an assumed role session.
   std::string session_name;
-  /// Optional external idenitifer to pass to STS when assuming a role
+  /// Optional external identifier to pass to STS when assuming a role
   std::string external_id;
   /// Frequency (in seconds) to refresh temporary credentials from assumed role
   int load_frequency = 900;
@@ -185,7 +185,7 @@ struct ARROW_EXPORT S3Options {
       const std::string& external_id = "", int load_frequency = 900,
       const std::shared_ptr<Aws::STS::STSClient>& stsClient = NULLPTR);
 
-  /// Configure with credentials from role assumed using a web identitiy token
+  /// Configure with credentials from role assumed using a web identity token
   void ConfigureAssumeRoleWithWebIdentityCredentials();
 
   std::string GetAccessKey() const;
diff --git a/cpp/src/arrow/filesystem/s3fs_test.cc 
b/cpp/src/arrow/filesystem/s3fs_test.cc
index b789845bd1..487a6abb18 100644
--- a/cpp/src/arrow/filesystem/s3fs_test.cc
+++ b/cpp/src/arrow/filesystem/s3fs_test.cc
@@ -365,10 +365,10 @@ TEST_F(S3RegionResolutionTest, RestrictedBucket) {
 }
 
 TEST_F(S3RegionResolutionTest, NonExistentBucket) {
-  auto maybe_region = ResolveS3BucketRegion("ursa-labs-non-existent-bucket");
+  auto maybe_region = ResolveS3BucketRegion("ursa-labs-nonexistent-bucket");
   ASSERT_RAISES(IOError, maybe_region);
   ASSERT_THAT(maybe_region.status().message(),
-              ::testing::HasSubstr("Bucket 'ursa-labs-non-existent-bucket' not 
found"));
+              ::testing::HasSubstr("Bucket 'ursa-labs-nonexistent-bucket' not 
found"));
 }
 
 TEST_F(S3RegionResolutionTest, InvalidBucketName) {
@@ -645,13 +645,13 @@ TEST_F(TestS3FS, GetFileInfoObject) {
   // Nonexistent
   AssertFileInfo(fs_.get(), "bucket/emptyd", FileType::NotFound);
   AssertFileInfo(fs_.get(), "bucket/somed", FileType::NotFound);
-  AssertFileInfo(fs_.get(), "non-existent-bucket/somed", FileType::NotFound);
+  AssertFileInfo(fs_.get(), "nonexistent-bucket/somed", FileType::NotFound);
 
   // Trailing slashes
   AssertFileInfo(fs_.get(), "bucket/emptydir/", FileType::Directory, kNoSize);
   AssertFileInfo(fs_.get(), "bucket/somefile/", FileType::File, 9);
   AssertFileInfo(fs_.get(), "bucket/emptyd/", FileType::NotFound);
-  AssertFileInfo(fs_.get(), "non-existent-bucket/somed/", FileType::NotFound);
+  AssertFileInfo(fs_.get(), "nonexistent-bucket/somed/", FileType::NotFound);
 
   // URIs
   ASSERT_RAISES(Invalid, fs_->GetFileInfo("s3:bucket/emptydir"));
@@ -1057,7 +1057,7 @@ TEST_F(TestS3FS, Move) {
   ASSERT_OK(fs_->Move("bucket/a=2/newfile", "bucket/a=3/newfile"));
 
   // Nonexistent
-  ASSERT_RAISES(IOError, fs_->Move("bucket/non-existent", "bucket/newfile2"));
+  ASSERT_RAISES(IOError, fs_->Move("bucket/nonexistent", "bucket/newfile2"));
   ASSERT_RAISES(IOError, fs_->Move("nonexistent-bucket/somefile", 
"bucket/newfile2"));
   ASSERT_RAISES(IOError, fs_->Move("bucket/somefile", 
"nonexistent-bucket/newfile2"));
   AssertFileInfo(fs_.get(), "bucket/newfile2", FileType::NotFound);
diff --git a/cpp/src/arrow/filesystem/test_util.h 
b/cpp/src/arrow/filesystem/test_util.h
index 8156721b85..c4d846fd31 100644
--- a/cpp/src/arrow/filesystem/test_util.h
+++ b/cpp/src/arrow/filesystem/test_util.h
@@ -170,7 +170,7 @@ class ARROW_TESTING_EXPORT GenericFileSystemTest {
   virtual bool allow_move_dir_over_non_empty_dir() const { return false; }
   // - Whether the filesystem allows appending to a file
   virtual bool allow_append_to_file() const { return true; }
-  // - Whether the filesystem allows appending to a new (not existent yet) file
+  // - Whether the filesystem allows appending to a nonexistent file
   virtual bool allow_append_to_new_file() const { return true; }
   // - Whether the filesystem supports directory modification times
   virtual bool have_directory_mtimes() const { return true; }

Reply via email to