This is an automated email from the ASF dual-hosted git repository.
shoothzj pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/bookkeeper.git
The following commit(s) were added to refs/heads/master by this push:
new 2eb70b1f82 ci: add typo ci check and fix typos (#4375)
2eb70b1f82 is described below
commit 2eb70b1f8216b2c6621d0e57cd491a2067824316
Author: ZhangJian He <[email protected]>
AuthorDate: Sun May 26 11:23:04 2024 +0800
ci: add typo ci check and fix typos (#4375)
### Motivation
Introduce typos ci to avoid typos. See
https://lists.apache.org/thread/04hqqcnkfc5189zsxj0s5wm37t2x7bky
Signed-off-by: ZhangJian He <[email protected]>
---
.github/workflows/bk-ci.yml | 8 ++++
.typos.toml | 56 ++++++++++++++++++++++
.../org/apache/bookkeeper/common/util/Retries.java | 2 +-
.../org/apache/bookkeeper/bookie/BookieShell.java | 4 +-
.../EntryLogManagerForEntryLogPerLedger.java | 2 +-
.../bookkeeper/bookie/EntryLoggerAllocator.java | 4 +-
.../bookie/InterleavedLedgerStorage.java | 2 +-
.../bookkeeper/bookie/LedgerDirsManager.java | 10 ++--
.../bookkeeper/bookie/LedgerDirsMonitor.java | 10 ++--
.../bookkeeper/bookie/storage/ldb/ReadCache.java | 2 +-
.../org/apache/bookkeeper/client/BookKeeper.java | 2 +-
.../apache/bookkeeper/client/BookKeeperAdmin.java | 6 +--
.../bookkeeper/client/DistributionSchedule.java | 2 +-
.../bookkeeper/client/EnsemblePlacementPolicy.java | 2 +-
.../client/LedgerFragmentReplicator.java | 8 ++--
.../org/apache/bookkeeper/client/LedgerHandle.java | 4 +-
.../client/WeightedRandomSelectionImpl.java | 12 ++---
.../apache/bookkeeper/client/api/OpenBuilder.java | 2 +-
.../apache/bookkeeper/client/api/ReadHandle.java | 2 +-
.../bookkeeper/client/api/WriteAdvHandle.java | 2 +-
.../bookkeeper/conf/ClientConfiguration.java | 14 +++---
.../bookkeeper/conf/ServerConfiguration.java | 4 +-
.../bookkeeper/discover/ZKRegistrationClient.java | 2 +-
.../apache/bookkeeper/feature/SettableFeature.java | 8 ++--
.../meta/AbstractHierarchicalLedgerManager.java | 2 +-
.../bookkeeper/meta/AbstractZkLedgerManager.java | 2 +-
.../org/apache/bookkeeper/meta/LedgerManager.java | 8 ++--
.../meta/LedgerUnderreplicationManager.java | 2 +-
.../bookkeeper/meta/MSLedgerManagerFactory.java | 4 +-
.../meta/ZkLedgerUnderreplicationManager.java | 4 +-
.../apache/bookkeeper/metastore/MSException.java | 2 +-
.../org/apache/bookkeeper/metastore/MetaStore.java | 2 +-
.../java/org/apache/bookkeeper/net/NodeBase.java | 2 +-
.../bookkeeper/proto/BookieProtoEncoding.java | 2 +-
.../bookkeeper/proto/PerChannelBookieClient.java | 4 +-
.../bookkeeper/proto/ReadEntryProcessorV3.java | 2 +-
.../bookkeeper/proto/WriteEntryProcessorV3.java | 2 +-
.../bookkeeper/replication/ReplicationWorker.java | 2 +-
.../bookkeeper/streaming/LedgerOutputStream.java | 2 +-
.../apache/bookkeeper/tls/TLSContextFactory.java | 4 +-
.../tools/cli/commands/bookie/LedgerCommand.java | 4 +-
.../cli/commands/bookie/ReadLedgerCommand.java | 6 +--
...generateInterleavedStorageIndexFileCommand.java | 2 +-
.../cli/commands/client/LedgerMetaDataCommand.java | 2 +-
.../util/AvailabilityOfEntriesOfLedger.java | 6 +--
.../org/apache/bookkeeper/util/DiskChecker.java | 2 +-
.../org/apache/bookkeeper/util/StringUtils.java | 4 +-
.../java/org/apache/bookkeeper/util/ZkUtils.java | 2 +-
.../util/collections/SynchronizedHashMultiMap.java | 2 +-
.../bookkeeper/client/HandleFailuresTest.java | 4 +-
.../apache/bookkeeper/util/StaticDNSResolver.java | 2 +-
conf/bk_server.conf | 10 ++--
conf/zookeeper.conf | 2 +-
src/owasp-dependency-check-suppressions.xml | 2 +-
.../bookkeeper/stats/codahale/FastTimer.java | 2 +-
.../prometheus/DataSketchesOpStatsLogger.java | 6 +--
.../bookkeeper/api/kv/result/DeleteResult.java | 2 +-
.../bookkeeper/clients/StorageClientBuilder.java | 2 +-
.../clients/impl/kv/PByteBufTableImpl.java | 2 +-
.../apache/distributedlog/BKAsyncLogWriter.java | 4 +-
.../apache/distributedlog/BKLogReadHandler.java | 2 +-
.../apache/distributedlog/BKLogSegmentWriter.java | 2 +-
.../DistributedLogConfiguration.java | 10 ++--
.../org/apache/distributedlog/ZooKeeperClient.java | 2 +-
.../distributedlog/ZooKeeperClientBuilder.java | 16 +++----
.../distributedlog/api/namespace/Namespace.java | 2 +-
.../impl/acl/ZKAccessControlManager.java | 2 +-
.../federated/FederatedZKLogMetadataStore.java | 2 +-
.../distributedlog/lock/ZKDistributedLock.java | 2 +-
.../apache/distributedlog/lock/ZKSessionLock.java | 6 +--
.../logsegment/LogSegmentEntryReader.java | 2 +-
.../distributedlog/namespace/NamespaceWatcher.java | 2 +-
.../distributedlog/tools/DistributedLogTool.java | 4 +-
.../org/apache/distributedlog/zk/DefaultZKOp.java | 2 +-
.../apache/distributedlog/zk/ZKWatcherManager.java | 12 ++---
.../core/src/test/resources/bk_server.conf | 4 +-
.../java/org/apache/distributedlog/LogRecord.java | 2 +-
stream/proto/src/main/proto/stream.proto | 2 +-
.../server/service/RegistrationStateService.java | 2 +-
.../bookkeeper/statelib/impl/mvcc/MVCCRecord.java | 2 +-
.../impl/rocksdb/checkpoint/CheckpointInfo.java | 2 +-
.../storage/api/cluster/ClusterInitializer.java | 4 +-
.../impl/cluster/ZkClusterMetadataStore.java | 8 ++--
.../bookkeeper/tools/perf/table/PerfClient.java | 2 +-
84 files changed, 222 insertions(+), 158 deletions(-)
diff --git a/.github/workflows/bk-ci.yml b/.github/workflows/bk-ci.yml
index 0dc2c7c5b3..3a21186376 100644
--- a/.github/workflows/bk-ci.yml
+++ b/.github/workflows/bk-ci.yml
@@ -483,6 +483,14 @@ jobs:
if: cancelled()
run: ./dev/ci-tool print_thread_dumps
+ typo-check:
+ name: Typo Check
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - name: Check typos
+ uses: crate-ci/typos@master
+
owasp-dependency-check:
name: OWASP Dependency Check
runs-on: ubuntu-latest
diff --git a/.typos.toml b/.typos.toml
new file mode 100644
index 0000000000..e8ef1d3a28
--- /dev/null
+++ b/.typos.toml
@@ -0,0 +1,56 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+[default.extend-words]
+# abbr
+"ba" = "ba"
+"bve" = "bve"
+"cace" = "cace"
+"cann" = "cann"
+"dbe" = "dbe"
+"entrys" = "entrys"
+"fo" = "fo"
+"ine" = "ine"
+"isse" = "isse"
+"mor" = "mor"
+"nwe" = "nwe"
+"nd" = "nd"
+"nin" = "nin"
+"oce" = "oce"
+"ot" = "ot"
+"ser" = "ser"
+"shouldnot" = "shouldnot"
+"tio" = "tio"
+"ue" = "ue"
+# keep for comptability
+"deleteable" = "deleteable"
+"infinit" = "infinit"
+"explict" = "explict"
+"uninitalize" = "uninitalize"
+# keyword fp
+"guage" = "guage"
+"passin" = "passin"
+"testng" = "testng"
+"vertx" = "vertx"
+"verticle" = "verticle"
+
+[files]
+extend-exclude = [
+
"bookkeeper-server/src/test/java/org/apache/bookkeeper/meta/TestLedgerMetadataSerDe.java",
+]
diff --git
a/bookkeeper-common/src/main/java/org/apache/bookkeeper/common/util/Retries.java
b/bookkeeper-common/src/main/java/org/apache/bookkeeper/common/util/Retries.java
index 6a52ef597c..dff0075ac6 100644
---
a/bookkeeper-common/src/main/java/org/apache/bookkeeper/common/util/Retries.java
+++
b/bookkeeper-common/src/main/java/org/apache/bookkeeper/common/util/Retries.java
@@ -75,7 +75,7 @@ public final class Retries {
* @param task a task to execute.
* @param scheduler scheduler to schedule the task and complete the
futures.
* @param key the submit key for the scheduler.
- * @param <ReturnT> the return tye.
+ * @param <ReturnT> the return type.
* @return future represents the result of the task with retries.
*/
public static <ReturnT> CompletableFuture<ReturnT> run(
diff --git
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/BookieShell.java
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/BookieShell.java
index 663bea1709..00869c7fc8 100644
---
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/BookieShell.java
+++
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/BookieShell.java
@@ -323,7 +323,7 @@ public class BookieShell implements Tool {
}
/**
- * Intializes new cluster by creating required znodes for the cluster. If
+ * Initializes new cluster by creating required znodes for the cluster. If
* ledgersrootpath is already existing then it will error out. If for any
* reason it errors out while creating znodes for the cluster, then before
* running initnewcluster again, try nuking existing cluster by running
@@ -704,7 +704,7 @@ public class BookieShell implements Tool {
ReadLedgerCommand cmd = new ReadLedgerCommand(entryFormatter,
ledgerIdFormatter);
ReadLedgerCommand.ReadLedgerFlags flags = new
ReadLedgerCommand.ReadLedgerFlags();
- flags.bookieAddresss(bookieAddress);
+ flags.bookieAddress(bookieAddress);
flags.firstEntryId(firstEntry);
flags.forceRecovery(forceRecovery);
flags.lastEntryId(lastEntry);
diff --git
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/EntryLogManagerForEntryLogPerLedger.java
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/EntryLogManagerForEntryLogPerLedger.java
index 6846b6b27f..ca6224ea83 100644
---
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/EntryLogManagerForEntryLogPerLedger.java
+++
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/EntryLogManagerForEntryLogPerLedger.java
@@ -152,7 +152,7 @@ class EntryLogManagerForEntryLogPerLedger extends
EntryLogManagerBase {
* 'expiry duration' and 'maximumSize' will be set to
* entryLogPerLedgerCounterLimitsMultFactor times of
* 'ledgerIdEntryLogMap' cache limits. This is needed because entries
- * from 'ledgerIdEntryLogMap' can be removed from cache becasue of
+ * from 'ledgerIdEntryLogMap' can be removed from cache because of
* accesstime expiry or cache size limits, but to know the actual
number
* of entrylogs per ledger, we should maintain this count for long
time.
*/
diff --git
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/EntryLoggerAllocator.java
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/EntryLoggerAllocator.java
index aec2fb1cd0..70b76aaf37 100644
---
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/EntryLoggerAllocator.java
+++
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/EntryLoggerAllocator.java
@@ -137,8 +137,8 @@ class EntryLoggerAllocator {
}
}
- void setWritingLogId(long lodId) {
- this.writingLogId = lodId;
+ void setWritingLogId(long logId) {
+ this.writingLogId = logId;
}
void setWritingCompactingLogId(long logId) {
diff --git
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/InterleavedLedgerStorage.java
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/InterleavedLedgerStorage.java
index 323f7012bc..4c6b7a9ee4 100644
---
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/InterleavedLedgerStorage.java
+++
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/InterleavedLedgerStorage.java
@@ -569,7 +569,7 @@ public class InterleavedLedgerStorage implements
CompactableLedgerStorage, Entry
// for interleaved ledger storage, we request a checkpoint when
rotating a entry log file.
// the checkpoint represent the point that all the entries added
before this point are already
// in ledger storage and ready to be synced to disk.
- // TODO: we could consider remove checkpointSource and
checkpointSouce#newCheckpoint
+ // TODO: we could consider remove checkpointSource and
checkpointSource#newCheckpoint
// later if we provide kind of LSN (Log/Journal Squeuence Number)
// mechanism when adding entry. {@link
https://github.com/apache/bookkeeper/issues/279}
Checkpoint checkpoint = checkpointSource.newCheckpoint();
diff --git
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/LedgerDirsManager.java
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/LedgerDirsManager.java
index 42910b0248..6689c96df5 100644
---
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/LedgerDirsManager.java
+++
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/LedgerDirsManager.java
@@ -200,20 +200,20 @@ public class LedgerDirsManager {
List<File> getDirsAboveUsableThresholdSize(long thresholdSize, boolean
loggingNoWritable)
throws NoWritableLedgerDirException {
- List<File> fullLedgerDirsToAccomodate = new ArrayList<File>();
+ List<File> fullLedgerDirsToAccommodate = new ArrayList<File>();
for (File dir: this.ledgerDirectories) {
// Pick dirs which can accommodate little more than thresholdSize
if (dir.getUsableSpace() > thresholdSize) {
- fullLedgerDirsToAccomodate.add(dir);
+ fullLedgerDirsToAccommodate.add(dir);
}
}
- if (!fullLedgerDirsToAccomodate.isEmpty()) {
+ if (!fullLedgerDirsToAccommodate.isEmpty()) {
if (loggingNoWritable) {
LOG.info("No writable ledger dirs below diskUsageThreshold. "
- + "But Dirs that can accommodate {} are: {}",
thresholdSize, fullLedgerDirsToAccomodate);
+ + "But Dirs that can accommodate {} are: {}",
thresholdSize, fullLedgerDirsToAccommodate);
}
- return fullLedgerDirsToAccomodate;
+ return fullLedgerDirsToAccommodate;
}
// We will reach here when we find no ledgerDir which has atleast
diff --git
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/LedgerDirsMonitor.java
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/LedgerDirsMonitor.java
index 7ad8ba1e0c..32321aee27 100644
---
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/LedgerDirsMonitor.java
+++
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/LedgerDirsMonitor.java
@@ -124,13 +124,13 @@ class LedgerDirsMonitor {
}
}
- List<File> fullfilledDirs = new
ArrayList<File>(ldm.getFullFilledLedgerDirs());
+ List<File> fulfilledDirs = new
ArrayList<File>(ldm.getFullFilledLedgerDirs());
boolean makeWritable = ldm.hasWritableLedgerDirs();
// When bookie is in READONLY mode, i.e there are no
writableLedgerDirs:
- // - Update fullfilledDirs disk usage.
+ // - Update fulfilledDirs disk usage.
// - If the total disk usage is below DiskLowWaterMarkUsageThreshold
- // add fullfilledDirs back to writableLedgerDirs list if their usage
is < conf.getDiskUsageThreshold.
+ // add fulfilledDirs back to writableLedgerDirs list if their usage is
< conf.getDiskUsageThreshold.
try {
if (!makeWritable) {
float totalDiskUsage =
diskChecker.getTotalDiskUsage(ldm.getAllLedgerDirs());
@@ -144,7 +144,7 @@ class LedgerDirsMonitor {
}
}
// Update all full-filled disk space usage
- for (File dir : fullfilledDirs) {
+ for (File dir : fulfilledDirs) {
try {
diskUsages.put(dir, diskChecker.checkDir(dir));
if (makeWritable) {
@@ -254,7 +254,7 @@ class LedgerDirsMonitor {
private void validateThreshold(float diskSpaceThreshold, float
diskSpaceLwmThreshold) {
if (diskSpaceThreshold <= 0 || diskSpaceThreshold >= 1 ||
diskSpaceLwmThreshold - diskSpaceThreshold > 1e-6) {
- throw new IllegalArgumentException("Disk space threashold: "
+ throw new IllegalArgumentException("Disk space threshold: "
+ diskSpaceThreshold + " and lwm threshold: " +
diskSpaceLwmThreshold
+ " are not valid. Should be > 0 and < 1 and
diskSpaceThreshold >= diskSpaceLwmThreshold");
}
diff --git
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/ldb/ReadCache.java
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/ldb/ReadCache.java
index ab2bc33cb4..1d850c456c 100644
---
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/ldb/ReadCache.java
+++
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/ldb/ReadCache.java
@@ -41,7 +41,7 @@ import org.slf4j.LoggerFactory;
*
* <p>Uses the specified amount of memory and pairs it with a hashmap.
*
- * <p>The memory is splitted in multiple segments that are used in a
+ * <p>The memory is split in multiple segments that are used in a
* ring-buffer fashion. When the read cache is full, the oldest segment
* is cleared and rotated to make space for new entries to be added to
* the read cache.
diff --git
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/BookKeeper.java
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/BookKeeper.java
index d7043dc8c9..751d40ef53 100644
---
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/BookKeeper.java
+++
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/BookKeeper.java
@@ -706,7 +706,7 @@ public class BookKeeper implements
org.apache.bookkeeper.client.api.BookKeeper {
* cheap to compute but does not protect against byzantine bookies (i.e., a
* bookie might report fake bytes and a matching CRC32). The MAC code is
more
* expensive to compute, but is protected by a password, i.e., a bookie
can't
- * report fake bytes with a mathching MAC unless it knows the password.
+ * report fake bytes with a matching MAC unless it knows the password.
* The CRC32C, which use SSE processor instruction, has better performance
than CRC32.
* Legacy DigestType for backward compatibility. If we want to add new
DigestType,
* we should add it in here, client.api.DigestType and DigestType in
DataFormats.proto.
diff --git
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/BookKeeperAdmin.java
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/BookKeeperAdmin.java
index 1df915f650..371c214532 100644
---
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/BookKeeperAdmin.java
+++
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/BookKeeperAdmin.java
@@ -1297,7 +1297,7 @@ public class BookKeeperAdmin implements AutoCloseable {
}
/**
- * Intializes new cluster by creating required znodes for the cluster. If
+ * Initializes new cluster by creating required znodes for the cluster. If
* ledgersrootpath is already existing then it will error out.
*
* @param conf
@@ -1569,7 +1569,7 @@ public class BookKeeperAdmin implements AutoCloseable {
* Triggers AuditTask by resetting lostBookieRecoveryDelay and then make
* sure the ledgers stored in the given decommissioning bookie are properly
* replicated and they are not underreplicated because of the given bookie.
- * This method waits untill there are no underreplicatedledgers because of
this
+ * This method waits until there are no underreplicatedledgers because of
this
* bookie. If the given Bookie is not shutdown yet, then it will throw
* BKIllegalOpException.
*
@@ -1612,7 +1612,7 @@ public class BookKeeperAdmin implements AutoCloseable {
Set<Long> ledgersStoredInThisBookie =
bookieToLedgersMap.get(bookieAddress.toString());
if ((ledgersStoredInThisBookie != null) &&
(!ledgersStoredInThisBookie.isEmpty())) {
/*
- * wait untill all the ledgers are replicated to other
+ * wait until all the ledgers are replicated to other
* bookies by making sure that these ledgers metadata don't
* contain this bookie as part of their ensemble.
*/
diff --git
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/DistributionSchedule.java
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/DistributionSchedule.java
index 2646d3abc5..295cbd9faa 100644
---
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/DistributionSchedule.java
+++
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/DistributionSchedule.java
@@ -22,7 +22,7 @@ import java.util.Map;
import org.apache.bookkeeper.net.BookieId;
/**
- * This interface determins how entries are distributed among bookies.
+ * This interface determines how entries are distributed among bookies.
*
* <p>Every entry gets replicated to some number of replicas. The first
replica for
* an entry is given a replicaIndex of 0, and so on. To distribute write load,
diff --git
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/EnsemblePlacementPolicy.java
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/EnsemblePlacementPolicy.java
index 05a687b964..58d2bc0fc4 100644
---
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/EnsemblePlacementPolicy.java
+++
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/EnsemblePlacementPolicy.java
@@ -369,7 +369,7 @@ public interface EnsemblePlacementPolicy {
*
* <p>The default implementation will pick a bookie randomly from the
ensemble.
* Other placement policies will be able to do better decisions based on
- * additional informations (eg: rack or region awareness).
+ * additional information (eg: rack or region awareness).
*
* @param metadata
* the {@link LedgerMetadata} object
diff --git
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/LedgerFragmentReplicator.java
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/LedgerFragmentReplicator.java
index 3d58bdae36..f6d54e1d02 100644
---
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/LedgerFragmentReplicator.java
+++
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/LedgerFragmentReplicator.java
@@ -233,11 +233,11 @@ public class LedgerFragmentReplicator {
final Set<BookieId> targetBookieAddresses,
final BiConsumer<Long, Long> onReadEntryFailureCallback)
throws InterruptedException {
- Set<LedgerFragment> partionedFragments = splitIntoSubFragments(lh, lf,
+ Set<LedgerFragment> partitionedFragments = splitIntoSubFragments(lh,
lf,
bkc.getConf().getRereplicationEntryBatchSize());
LOG.info("Replicating fragment {} in {} sub fragments.",
- lf, partionedFragments.size());
- replicateNextBatch(lh, partionedFragments.iterator(),
+ lf, partitionedFragments.size());
+ replicateNextBatch(lh, partitionedFragments.iterator(),
ledgerFragmentMcb, targetBookieAddresses,
onReadEntryFailureCallback);
}
@@ -559,7 +559,7 @@ public class LedgerFragmentReplicator {
/**
* Callback for recovery of a single ledger fragment. Once the fragment has
* had all entries replicated, update the ensemble in zookeeper. Once
- * finished propogate callback up to ledgerFragmentsMcb which should be a
+ * finished propagate callback up to ledgerFragmentsMcb which should be a
* multicallback responsible for all fragments in a single ledger
*/
static class SingleFragmentCallback implements AsyncCallback.VoidCallback {
diff --git
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/LedgerHandle.java
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/LedgerHandle.java
index c49aa65616..6a15cb42f7 100644
---
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/LedgerHandle.java
+++
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/LedgerHandle.java
@@ -785,7 +785,7 @@ public class LedgerHandle implements WriteHandle {
* Read a sequence of entries asynchronously, allowing to read after the
LastAddConfirmed range.
* <br>This is the same of
* {@link #asyncReadEntries(long, long, ReadCallback, Object) }
- * but it lets the client read without checking the local value of
LastAddConfirmed, so that it is possibile to
+ * but it lets the client read without checking the local value of
LastAddConfirmed, so that it is possible to
* read entries for which the writer has not received the acknowledge yet.
<br>
* For entries which are within the range 0..LastAddConfirmed BookKeeper
guarantees that the writer has successfully
* received the acknowledge.<br>
@@ -1009,7 +1009,7 @@ public class LedgerHandle implements WriteHandle {
* Read a sequence of entries asynchronously, allowing to read after the
LastAddConfirmed range.
* <br>This is the same of
* {@link #asyncReadEntries(long, long, ReadCallback, Object) }
- * but it lets the client read without checking the local value of
LastAddConfirmed, so that it is possibile to
+ * but it lets the client read without checking the local value of
LastAddConfirmed, so that it is possible to
* read entries for which the writer has not received the acknowledge yet.
<br>
* For entries which are within the range 0..LastAddConfirmed BookKeeper
guarantees that the writer has successfully
* received the acknowledge.<br>
diff --git
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/WeightedRandomSelectionImpl.java
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/WeightedRandomSelectionImpl.java
index 16443c63d2..1a2b9f0dcd 100644
---
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/WeightedRandomSelectionImpl.java
+++
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/WeightedRandomSelectionImpl.java
@@ -37,7 +37,7 @@ class WeightedRandomSelectionImpl<T> implements
WeightedRandomSelection<T> {
Double randomMax;
int maxProbabilityMultiplier;
Map<T, WeightedObject> map;
- TreeMap<Double, T> cummulativeMap = new TreeMap<Double, T>();
+ TreeMap<Double, T> cumulativeMap = new TreeMap<Double, T>();
ReadWriteLock rwLock = new ReentrantReadWriteLock(true);
WeightedRandomSelectionImpl() {
@@ -120,10 +120,10 @@ class WeightedRandomSelectionImpl<T> implements
WeightedRandomSelection<T> {
// The probability of picking a bookie randomly is
defaultPickProbability
// but we change that priority by looking at the weight that each
bookie
// carries.
- TreeMap<Double, T> tmpCummulativeMap = new TreeMap<Double, T>();
+ TreeMap<Double, T> tmpCumulativeMap = new TreeMap<Double, T>();
Double key = 0.0;
for (Map.Entry<T, Double> e : weightMap.entrySet()) {
- tmpCummulativeMap.put(key, e.getKey());
+ tmpCumulativeMap.put(key, e.getKey());
if (LOG.isDebugEnabled()) {
LOG.debug("Key: {} Value: {} AssignedKey: {} AssignedWeight:
{}",
e.getKey(), e.getValue(), key, e.getValue());
@@ -134,7 +134,7 @@ class WeightedRandomSelectionImpl<T> implements
WeightedRandomSelection<T> {
rwLock.writeLock().lock();
try {
this.map = map;
- cummulativeMap = tmpCummulativeMap;
+ cumulativeMap = tmpCumulativeMap;
randomMax = key;
} finally {
rwLock.writeLock().unlock();
@@ -148,8 +148,8 @@ class WeightedRandomSelectionImpl<T> implements
WeightedRandomSelection<T> {
// pick a random number between 0 and randMax
Double randomNum = randomMax * Math.random();
// find the nearest key in the map corresponding to the randomNum
- Double key = cummulativeMap.floorKey(randomNum);
- return cummulativeMap.get(key);
+ Double key = cumulativeMap.floorKey(randomNum);
+ return cumulativeMap.get(key);
} finally {
rwLock.readLock().unlock();
}
diff --git
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/api/OpenBuilder.java
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/api/OpenBuilder.java
index a0c09a3c92..9b843a1711 100644
---
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/api/OpenBuilder.java
+++
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/api/OpenBuilder.java
@@ -53,7 +53,7 @@ public interface OpenBuilder extends OpBuilder<ReadHandle> {
OpenBuilder withRecovery(boolean recovery);
/**
- * Sets the password to be used to open the ledger. It defauls to an empty
password
+ * Sets the password to be used to open the ledger. It defaults to an
empty password
*
* @param password the password to unlock the ledger
*
diff --git
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/api/ReadHandle.java
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/api/ReadHandle.java
index e9bcddd0b3..8e2e633a35 100644
---
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/api/ReadHandle.java
+++
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/api/ReadHandle.java
@@ -95,7 +95,7 @@ public interface ReadHandle extends Handle {
* Read a sequence of entries asynchronously, allowing to read after the
LastAddConfirmed range.
* <br>This is the same of
* {@link #read(long, long) }
- * but it lets the client read without checking the local value of
LastAddConfirmed, so that it is possibile to
+ * but it lets the client read without checking the local value of
LastAddConfirmed, so that it is possible to
* read entries for which the writer has not received the acknowledge yet.
<br>
* For entries which are within the range 0..LastAddConfirmed BookKeeper
guarantees that the writer has successfully
* received the acknowledge.<br>
diff --git
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/api/WriteAdvHandle.java
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/api/WriteAdvHandle.java
index 7fd80a0637..9c33ecfbb1 100644
---
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/api/WriteAdvHandle.java
+++
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/api/WriteAdvHandle.java
@@ -29,7 +29,7 @@ import
org.apache.bookkeeper.common.annotation.InterfaceStability.Unstable;
import org.apache.bookkeeper.common.concurrent.FutureUtils;
/**
- * Provide write access to a ledger. Using WriteAdvHandler the writer MUST
explictly set an entryId. Beware that the
+ * Provide write access to a ledger. Using WriteAdvHandler the writer MUST
explicitly set an entryId. Beware that the
* write for a given entryId will be acknowledged if and only if all entries
up to entryId - 1 have been acknowledged
* too (expected from entryId 0)
*
diff --git
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/conf/ClientConfiguration.java
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/conf/ClientConfiguration.java
index feae692465..cb942d99b2 100644
---
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/conf/ClientConfiguration.java
+++
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/conf/ClientConfiguration.java
@@ -980,7 +980,7 @@ public class ClientConfiguration extends
AbstractConfiguration<ClientConfigurati
}
/**
- * Multipler to use when determining time between successive speculative
read requests.
+ * Multiplier to use when determining time between successive speculative
read requests.
*
* @return speculative read timeout backoff multiplier.
*/
@@ -989,10 +989,10 @@ public class ClientConfiguration extends
AbstractConfiguration<ClientConfigurati
}
/**
- * Set the multipler to use when determining time between successive
speculative read requests.
+ * Set the multiplier to use when determining time between successive
speculative read requests.
*
* @param speculativeReadTimeoutBackoffMultiplier
- * multipler to use when determining time between successive
speculative read requests.
+ * multiplier to use when determining time between successive
speculative read requests.
* @return client configuration.
*/
public ClientConfiguration setSpeculativeReadTimeoutBackoffMultiplier(
@@ -1002,7 +1002,7 @@ public class ClientConfiguration extends
AbstractConfiguration<ClientConfigurati
}
/**
- * Multipler to use when determining time between successive speculative
read LAC requests.
+ * Multiplier to use when determining time between successive speculative
read LAC requests.
*
* @return speculative read LAC timeout backoff multiplier.
*/
@@ -1011,10 +1011,10 @@ public class ClientConfiguration extends
AbstractConfiguration<ClientConfigurati
}
/**
- * Set the multipler to use when determining time between successive
speculative read LAC requests.
+ * Set the multiplier to use when determining time between successive
speculative read LAC requests.
*
* @param speculativeReadLACTimeoutBackoffMultiplier
- * multipler to use when determining time between successive
speculative read LAC requests.
+ * multiplier to use when determining time between successive
speculative read LAC requests.
* @return client configuration.
*/
public ClientConfiguration setSpeculativeReadLACTimeoutBackoffMultiplier(
@@ -1193,7 +1193,7 @@ public class ClientConfiguration extends
AbstractConfiguration<ClientConfigurati
* preference) to read all entries for a ledger.
*
* <p>Having all the read to one bookie will increase the chances that
- * a read request will be fullfilled by Bookie read cache (or OS file
+ * a read request will be fulfilled by Bookie read cache (or OS file
* system cache) when doing sequential reads.
*
* @param enabled the flag to enable/disable sticky reads.
diff --git
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/conf/ServerConfiguration.java
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/conf/ServerConfiguration.java
index 51e45dc6f8..7785bfae0b 100644
---
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/conf/ServerConfiguration.java
+++
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/conf/ServerConfiguration.java
@@ -409,7 +409,7 @@ public class ServerConfiguration extends
AbstractConfiguration<ServerConfigurati
/**
* Get Garbage collection wait time. Default value is 10 minutes.
* The guideline is not to set a too low value for this, if using
zookeeper based
- * ledger manager. And it would be nice to align with the average lifecyle
time of
+ * ledger manager. And it would be nice to align with the average
lifecycle time of
* ledgers in the system.
*
* @return gc wait time
@@ -1223,7 +1223,7 @@ public class ServerConfiguration extends
AbstractConfiguration<ServerConfigurati
* Configure the bookie to advertise a specific BookieId.
*
* <p>By default, a bookie will advertise a BookieId computed
- * from the primary network endpoint addresss.
+ * from the primary network endpoint address.
*
* @see #getBookieId()
* @see #setAdvertisedAddress(java.lang.String)
diff --git
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/discover/ZKRegistrationClient.java
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/discover/ZKRegistrationClient.java
index 2f143c07f6..cca631086b 100644
---
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/discover/ZKRegistrationClient.java
+++
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/discover/ZKRegistrationClient.java
@@ -369,7 +369,7 @@ public class ZKRegistrationClient implements
RegistrationClient {
.collect(bookieInfoUpdated)
.whenComplete((List<Versioned<BookieServiceInfo>>
info, Throwable error) -> {
// we are ignoring errors intentionally
- // there could be bookies that publish unparseable
information
+ // there could be bookies that publish unparsable
information
// or other temporary/permanent errors
future.complete(new Versioned<>(bookies, version));
});
diff --git
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/feature/SettableFeature.java
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/feature/SettableFeature.java
index bb6d1db091..797f5badee 100644
---
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/feature/SettableFeature.java
+++
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/feature/SettableFeature.java
@@ -28,16 +28,16 @@ public class SettableFeature extends FixedValueFeature {
super(name, initialAvailability);
}
- public SettableFeature(String name, boolean isAvailabile) {
- super(name, isAvailabile);
+ public SettableFeature(String name, boolean isAvailable) {
+ super(name, isAvailable);
}
public void set(int availability) {
this.availability = availability;
}
- public void set(boolean isAvailabile) {
- this.availability = isAvailabile ? FEATURE_AVAILABILITY_MAX_VALUE : 0;
+ public void set(boolean isAvailable) {
+ this.availability = isAvailable ? FEATURE_AVAILABILITY_MAX_VALUE : 0;
}
}
diff --git
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/meta/AbstractHierarchicalLedgerManager.java
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/meta/AbstractHierarchicalLedgerManager.java
index c7a730cc73..4a1ad27bb5 100644
---
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/meta/AbstractHierarchicalLedgerManager.java
+++
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/meta/AbstractHierarchicalLedgerManager.java
@@ -190,7 +190,7 @@ public abstract class AbstractHierarchicalLedgerManager
extends AbstractZkLedger
NavigableSet<Long> zkActiveLedgers = new TreeSet<Long>();
if (!path.startsWith(ledgerRootPath)) {
- LOG.warn("Ledger path [{}] is not a valid path name, it should
start wth {}", path, ledgerRootPath);
+ LOG.warn("Ledger path [{}] is not a valid path name, it should
start with {}", path, ledgerRootPath);
return zkActiveLedgers;
}
diff --git
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/meta/AbstractZkLedgerManager.java
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/meta/AbstractZkLedgerManager.java
index 102c7d7792..59b17be922 100644
---
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/meta/AbstractZkLedgerManager.java
+++
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/meta/AbstractZkLedgerManager.java
@@ -553,7 +553,7 @@ public abstract class AbstractZkLedgerManager implements
LedgerManager, Watcher
* Process ledgers in a single zk node.
*
* <p>
- * for each ledger found in this zk node, processor#process(ledgerId) will
be triggerred
+ * for each ledger found in this zk node, processor#process(ledgerId) will
be triggered
* to process a specific ledger. after all ledgers has been processed, the
finalCb will
* be called with provided context object. The RC passed to finalCb is
decided by :
* <ul>
diff --git
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/meta/LedgerManager.java
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/meta/LedgerManager.java
index 2f08c2e5b7..db197ce02e 100644
---
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/meta/LedgerManager.java
+++
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/meta/LedgerManager.java
@@ -97,7 +97,7 @@ public interface LedgerManager extends Closeable {
* @param currentVersion
* The version of the metadata we expect to be overwriting.
* @return Future which, when completed, contains the newly written
metadata.
- * Comleted with an exceptione:<ul>
+ * Completed with an exception:<ul>
* <li>{@link
org.apache.bookkeeper.client.BKException.BKMetadataVersionException}
* if version in metadata doesn't match</li>
* <li>{@link
org.apache.bookkeeper.client.BKException.ZKException} for other issue</li>
@@ -130,9 +130,9 @@ public interface LedgerManager extends Closeable {
* Loop to process all ledgers.
* <p>
* <ul>
- * After all ledgers were processed, finalCb will be triggerred:
+ * After all ledgers were processed, finalCb will be triggered:
* <li> if all ledgers are processed done with OK, success rc will be
passed to finalCb.
- * <li> if some ledgers are prcoessed failed, failure rc will be passed to
finalCb.
+ * <li> if some ledgers are processed failed, failure rc will be passed to
finalCb.
* </ul>
* </p>
*
@@ -145,7 +145,7 @@ public interface LedgerManager extends Closeable {
* @param successRc
* Success RC code passed to finalCb when callback
* @param failureRc
- * Failure RC code passed to finalCb when exceptions occured.
+ * Failure RC code passed to finalCb when exceptions occurred.
*/
void asyncProcessLedgers(Processor<Long> processor,
AsyncCallback.VoidCallback finalCb,
Object context, int successRc, int
failureRc);
diff --git
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/meta/LedgerUnderreplicationManager.java
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/meta/LedgerUnderreplicationManager.java
index 256f7814d7..64548fbd13 100644
---
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/meta/LedgerUnderreplicationManager.java
+++
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/meta/LedgerUnderreplicationManager.java
@@ -49,7 +49,7 @@ public interface LedgerUnderreplicationManager extends
AutoCloseable {
/**
* Mark a ledger as underreplicated with missing bookies. The replication
should then
- * check which fragements are underreplicated and rereplicate them.
+ * check which fragments are underreplicated and rereplicate them.
*
* @param ledgerId ledger id
* @param missingReplicas missing replicas
diff --git
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/meta/MSLedgerManagerFactory.java
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/meta/MSLedgerManagerFactory.java
index fa4776bd28..bc9ae3a35a 100644
---
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/meta/MSLedgerManagerFactory.java
+++
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/meta/MSLedgerManagerFactory.java
@@ -488,7 +488,7 @@ public class MSLedgerManagerFactory extends
AbstractZkLedgerManagerFactory {
@Override
public void complete(int rc, Version version, Object ctx) {
if (MSException.Code.BadVersion.getCode() == rc) {
- LOG.info("Bad version provided to updat metadata for
ledger {}", ledgerId);
+ LOG.info("Bad version provided to update metadata for
ledger {}", ledgerId);
promise.completeExceptionally(new
BKException.BKMetadataVersionException());
} else if (MSException.Code.NoKey.getCode() == rc) {
LOG.warn("Ledger {} doesn't exist when writing its
ledger metadata.", ledgerId);
@@ -761,7 +761,7 @@ public class MSLedgerManagerFactory extends
AbstractZkLedgerManagerFactory {
try {
MetastoreUtils.cleanTable(ledgerTable,
conf.getMetastoreMaxEntriesPerScan());
} catch (MSException mse) {
- throw new IOException("Exception when cleanning up table " +
TABLE_NAME, mse);
+ throw new IOException("Exception when cleaning up table " +
TABLE_NAME, mse);
}
LOG.info("Finished cleaning up table {}.", TABLE_NAME);
// Delete and recreate the LAYOUT information.
diff --git
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/meta/ZkLedgerUnderreplicationManager.java
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/meta/ZkLedgerUnderreplicationManager.java
index fd43fcfa3e..4118e19131 100644
---
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/meta/ZkLedgerUnderreplicationManager.java
+++
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/meta/ZkLedgerUnderreplicationManager.java
@@ -73,7 +73,7 @@ import org.slf4j.LoggerFactory;
/**
* ZooKeeper implementation of underreplication manager.
- * This is implemented in a heirarchical fashion, so it'll work with
+ * This is implemented in a hierarchical fashion, so it'll work with
* FlatLedgerManagerFactory and HierarchicalLedgerManagerFactory.
*
* <p>Layout is:
@@ -82,7 +82,7 @@ import org.slf4j.LoggerFactory;
* locks/(ledgerId)
*
* <p>The hierarchical path is created by splitting the ledger into 4 2byte
- * segments which are represented in hexidecimal.
+ * segments which are represented in hexadecimal.
* e.g. For ledger id 0xcafebeef0000feed, the path is
* cafe/beef/0000/feed/
*/
diff --git
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/metastore/MSException.java
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/metastore/MSException.java
index 3fbcf538d9..f0d2c3bc74 100644
---
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/metastore/MSException.java
+++
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/metastore/MSException.java
@@ -40,7 +40,7 @@ public abstract class MSException extends Exception {
InterruptedException (-100, "Operation interrupted"),
IllegalOp (-101, "Illegal operation"),
ServiceDown (-102, "Metadata service is down"),
- OperationFailure(-103, "Operaion failed on metadata storage server
side");
+ OperationFailure(-103, "Operation failed on metadata storage server
side");
private static final Map<Integer, Code> codes = new HashMap<Integer,
Code>();
diff --git
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/metastore/MetaStore.java
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/metastore/MetaStore.java
index 8d7c5d4a7c..04eb74f2eb 100644
---
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/metastore/MetaStore.java
+++
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/metastore/MetaStore.java
@@ -31,7 +31,7 @@ public interface MetaStore {
String getName();
/**
- * Get the plugin verison.
+ * Get the plugin version.
*
* @return the plugin version.
*/
diff --git
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/net/NodeBase.java
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/net/NodeBase.java
index 46442f50c1..8e0233ad16 100644
--- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/net/NodeBase.java
+++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/net/NodeBase.java
@@ -48,7 +48,7 @@ public class NodeBase implements Node {
/**
* Construct a node from its path.
* @param path
- * a concatenation of this node's location, the path seperator, and its
name
+ * a concatenation of this node's location, the path separator, and its
name
*/
public NodeBase(String path) {
path = normalize(path);
diff --git
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/BookieProtoEncoding.java
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/BookieProtoEncoding.java
index d8bfb4257a..0c3b7bf8e8 100644
---
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/BookieProtoEncoding.java
+++
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/BookieProtoEncoding.java
@@ -475,7 +475,7 @@ public class BookieProtoEncoding {
// a heap buffer while serializing and pass it down to netty library.
// In AbstractChannel#filterOutboundMessage(), netty copies that data
to a direct buffer if
// it is currently in heap (otherwise skips it and uses it directly).
- // Allocating a direct buffer reducing unncessary CPU cycles for
buffer copies in BK client
+ // Allocating a direct buffer reducing unnecessary CPU cycles for
buffer copies in BK client
// and also helps alleviate pressure off the GC, since there is less
memory churn.
// Bookies aren't usually CPU bound. This change improves READ_ENTRY
code paths by a small factor as well.
ByteBuf buf = allocator.directBuffer(frameSize, frameSize);
diff --git
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/PerChannelBookieClient.java
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/PerChannelBookieClient.java
index 0e5335ca42..5fe1a6e2ef 100644
---
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/PerChannelBookieClient.java
+++
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/PerChannelBookieClient.java
@@ -588,7 +588,7 @@ public class PerChannelBookieClient extends
ChannelInboundHandlerAdapter {
}
// In the netty pipeline, we need to split packets based on length, so
we
- // use the {@link LengthFieldBasedFramDecoder}. Other than that all
actions
+ // use the {@link LengthFieldBasedFrameDecoder}. Other than that all
actions
// are carried out in this class, e.g., making sense of received
messages,
// prepending the length to outgoing packets etc.
bootstrap.handler(new ChannelInitializer<Channel>() {
@@ -2373,7 +2373,7 @@ public class PerChannelBookieClient extends
ChannelInboundHandlerAdapter {
}
}
- // visable for testing
+ // visible for testing
CompletionKey newCompletionKey(long txnId, OperationType operationType) {
return new TxnCompletionKey(txnId, operationType);
}
diff --git
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/ReadEntryProcessorV3.java
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/ReadEntryProcessorV3.java
index fb0b4f62a8..3a85ca0949 100644
---
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/ReadEntryProcessorV3.java
+++
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/ReadEntryProcessorV3.java
@@ -200,7 +200,7 @@ class ReadEntryProcessorV3 extends PacketProcessorBaseV3 {
.setLedgerId(ledgerId)
.setEntryId(entryId);
try {
- // handle fence reqest
+ // handle fence request
if (RequestUtils.isFenceRequest(readRequest)) {
LOG.info("Ledger fence request received for ledger: {} from
address: {}", ledgerId,
channel.remoteAddress());
diff --git
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/WriteEntryProcessorV3.java
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/WriteEntryProcessorV3.java
index 381b887a16..3aaa9219f9 100644
---
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/WriteEntryProcessorV3.java
+++
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/WriteEntryProcessorV3.java
@@ -126,7 +126,7 @@ class WriteEntryProcessorV3 extends PacketProcessorBaseV3 {
status = StatusCode.EOK;
} catch (OperationRejectedException e) {
requestProcessor.getRequestStats().getAddEntryRejectedCounter().inc();
- // Avoid to log each occurence of this exception as this can
happen when the ledger storage is
+ // Avoid to log each occurrence of this exception as this can
happen when the ledger storage is
// unable to keep up with the write rate.
if (logger.isDebugEnabled()) {
logger.debug("Operation rejected while writing {}", request,
e);
diff --git
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/replication/ReplicationWorker.java
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/replication/ReplicationWorker.java
index cf1f2f2f59..c32487a30b 100644
---
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/replication/ReplicationWorker.java
+++
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/replication/ReplicationWorker.java
@@ -558,7 +558,7 @@ public class ReplicationWorker implements Runnable {
*
* <p>To avoid this situation, we need to check if bookies in the final
open ensemble
* are unavailable, and take action if so. The action to take is to close
the ledger,
- * after a grace period as the writting client may replace the faulty
bookie on its
+ * after a grace period as the writing client may replace the faulty
bookie on its
* own.
*
* <p>Missing bookies in closed ledgers are fine, as we know the last
confirmed add, so
diff --git
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/streaming/LedgerOutputStream.java
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/streaming/LedgerOutputStream.java
index ed73293a54..94866877dc 100644
---
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/streaming/LedgerOutputStream.java
+++
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/streaming/LedgerOutputStream.java
@@ -89,7 +89,7 @@ public class LedgerOutputStream extends OutputStream {
try {
lh.addEntry(b);
} catch (InterruptedException ie) {
- LOG.warn("Interrupted while flusing " + ie);
+ LOG.warn("Interrupted while flushing " + ie);
Thread.currentThread().interrupt();
} catch (BKException bke) {
LOG.warn("BookKeeper exception ", bke);
diff --git
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/tls/TLSContextFactory.java
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/tls/TLSContextFactory.java
index 03f9951d8c..7570436b95 100644
---
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/tls/TLSContextFactory.java
+++
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/tls/TLSContextFactory.java
@@ -263,7 +263,7 @@ public class TLSContextFactory implements
SecurityHandlerFactory {
// get key-file and trust-file locations and passwords
if (!(config instanceof ClientConfiguration)) {
- throw new SecurityException("Client configruation not provided");
+ throw new SecurityException("Client configuration not provided");
}
clientConf = (ClientConfiguration) config;
@@ -387,7 +387,7 @@ public class TLSContextFactory implements
SecurityHandlerFactory {
// get key-file and trust-file locations and passwords
if (!(config instanceof ServerConfiguration)) {
- throw new SecurityException("Server configruation not provided");
+ throw new SecurityException("Server configuration not provided");
}
serverConf = (ServerConfiguration) config;
diff --git
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/tools/cli/commands/bookie/LedgerCommand.java
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/tools/cli/commands/bookie/LedgerCommand.java
index fa753e9f47..7434706b9f 100644
---
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/tools/cli/commands/bookie/LedgerCommand.java
+++
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/tools/cli/commands/bookie/LedgerCommand.java
@@ -136,8 +136,8 @@ public class LedgerCommand extends
BookieCommand<LedgerCommand.LedgerFlags> {
private boolean dumpLedgerInfo(long ledgerId, ServerConfiguration conf) {
try {
- DbLedgerStorage.readLedgerIndexEntries(ledgerId, conf,
(currentEntry, entryLodId, position) -> System.out
- .println("entry " + currentEntry + "\t:\t(log: " +
entryLodId + ", pos: " + position + ")"));
+ DbLedgerStorage.readLedgerIndexEntries(ledgerId, conf,
(currentEntry, entryLogId, position) -> System.out
+ .println("entry " + currentEntry + "\t:\t(log: " +
entryLogId + ", pos: " + position + ")"));
} catch (IOException e) {
System.err.printf("ERROR: initializing dbLedgerStorage %s",
e.getMessage());
return false;
diff --git
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/tools/cli/commands/bookie/ReadLedgerCommand.java
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/tools/cli/commands/bookie/ReadLedgerCommand.java
index 20b4232e31..e07dc9d9d9 100644
---
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/tools/cli/commands/bookie/ReadLedgerCommand.java
+++
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/tools/cli/commands/bookie/ReadLedgerCommand.java
@@ -109,7 +109,7 @@ public class ReadLedgerCommand extends
BookieCommand<ReadLedgerCommand.ReadLedge
private boolean forceRecovery;
@Parameter(names = { "-b", "--bookie" }, description = "Only read from
a specific bookie")
- private String bookieAddresss;
+ private String bookieAddress;
@Parameter(names = { "-lf", "--ledgeridformatter" }, description =
"Set ledger id formatter")
private String ledgerIdFormatter;
@@ -146,9 +146,9 @@ public class ReadLedgerCommand extends
BookieCommand<ReadLedgerCommand.ReadLedge
long lastEntry = flags.lastEntryId;
final BookieId bookie;
- if (flags.bookieAddresss != null) {
+ if (flags.bookieAddress != null) {
// A particular bookie was specified
- bookie = BookieId.parse(flags.bookieAddresss);
+ bookie = BookieId.parse(flags.bookieAddress);
} else {
bookie = null;
}
diff --git
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/tools/cli/commands/bookie/RegenerateInterleavedStorageIndexFileCommand.java
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/tools/cli/commands/bookie/RegenerateInterleavedStorageIndexFileCommand.java
index bbf933fda8..c50e808116 100644
---
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/tools/cli/commands/bookie/RegenerateInterleavedStorageIndexFileCommand.java
+++
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/tools/cli/commands/bookie/RegenerateInterleavedStorageIndexFileCommand.java
@@ -79,7 +79,7 @@ public class RegenerateInterleavedStorageIndexFileCommand
description = "The password in base64 encoding, for cases where
the password is not UTF-8.")
private String b64Password = DEFAULT;
- @Parameter(names = { "-d", "--dryrun" }, description = "Process the
entryLogger, but don't write anthing.")
+ @Parameter(names = { "-d", "--dryrun" }, description = "Process the
entryLogger, but don't write anything.")
private boolean dryRun;
@Parameter(names = { "-l", "--ledgerids" },
diff --git
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/tools/cli/commands/client/LedgerMetaDataCommand.java
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/tools/cli/commands/client/LedgerMetaDataCommand.java
index 4bbb50e7cc..430c0dde21 100644
---
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/tools/cli/commands/client/LedgerMetaDataCommand.java
+++
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/tools/cli/commands/client/LedgerMetaDataCommand.java
@@ -137,7 +137,7 @@ public class LedgerMetaDataCommand extends
BookieCommand<LedgerMetaDataCommand.L
throw be;
}
m.writeLedgerMetadata(flag.ledgerId, md, new
LongVersion(-1L)).join();
- LOG.info("successsfully updated ledger metadata {}",
flag.ledgerId);
+ LOG.info("successfully updated ledger metadata {}",
flag.ledgerId);
}
} else {
printLedgerMetadata(flag.ledgerId,
m.readLedgerMetadata(flag.ledgerId).get().getValue(), true);
diff --git
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/AvailabilityOfEntriesOfLedger.java
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/AvailabilityOfEntriesOfLedger.java
index 24361cede9..cf221f5dda 100644
---
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/AvailabilityOfEntriesOfLedger.java
+++
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/AvailabilityOfEntriesOfLedger.java
@@ -178,14 +178,14 @@ public class AvailabilityOfEntriesOfLedger {
public AvailabilityOfEntriesOfLedger(PrimitiveIterator.OfLong
entriesOfLedgerItr) {
while (entriesOfLedgerItr.hasNext()) {
-
this.addEntryToAvailabileEntriesOfLedger(entriesOfLedgerItr.nextLong());
+
this.addEntryToAvailableEntriesOfLedger(entriesOfLedgerItr.nextLong());
}
this.closeStateOfEntriesOfALedger();
}
public AvailabilityOfEntriesOfLedger(long[] entriesOfLedger) {
for (long entry : entriesOfLedger) {
- this.addEntryToAvailabileEntriesOfLedger(entry);
+ this.addEntryToAvailableEntriesOfLedger(entry);
}
this.closeStateOfEntriesOfALedger();
}
@@ -310,7 +310,7 @@ public class AvailabilityOfEntriesOfLedger {
}
}
- private void addEntryToAvailabileEntriesOfLedger(long entryId) {
+ private void addEntryToAvailableEntriesOfLedger(long entryId) {
if (!isCurSequenceInitialized()) {
initializeCurSequence(entryId);
} else if (isEntryExistingInCurSequence(entryId)) {
diff --git
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/DiskChecker.java
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/DiskChecker.java
index db8d350c97..0cac7b8043 100644
---
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/DiskChecker.java
+++
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/DiskChecker.java
@@ -276,7 +276,7 @@ public class DiskChecker {
private void validateThreshold(float diskSpaceThreshold, float
diskSpaceWarnThreshold) {
if (diskSpaceThreshold <= 0 || diskSpaceThreshold >= 1 ||
diskSpaceWarnThreshold - diskSpaceThreshold > 1e-6) {
- throw new IllegalArgumentException("Disk space threashold: "
+ throw new IllegalArgumentException("Disk space threshold: "
+ diskSpaceThreshold + " and warn threshold: " +
diskSpaceWarnThreshold
+ " are not valid. Should be > 0 and < 1 and
diskSpaceThreshold >= diskSpaceWarnThreshold");
}
diff --git
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/StringUtils.java
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/StringUtils.java
index 1a752003ef..edbe49989d 100644
---
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/StringUtils.java
+++
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/StringUtils.java
@@ -22,7 +22,7 @@ import java.io.IOException;
import org.apache.bookkeeper.proto.BookkeeperProtocol;
/**
- * Provided utilites for parsing network addresses, ledger-id from node paths
+ * Provided utilities for parsing network addresses, ledger-id from node paths
* etc.
*
*/
@@ -163,7 +163,7 @@ public class StringUtils {
}
/**
- * Builds string representation of teh request without extra (i.e. binary)
data
+ * Builds string representation of the request without extra (i.e. binary)
data
*
* @param request
* @return string representation of request
diff --git
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/ZkUtils.java
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/ZkUtils.java
index 6b8ef86cc3..48ea04a3f4 100644
--- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/ZkUtils.java
+++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/ZkUtils.java
@@ -41,7 +41,7 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
- * Provided utilites for zookeeper access, etc.
+ * Provided utilities for zookeeper access, etc.
*/
public class ZkUtils {
private static final Logger LOG = LoggerFactory.getLogger(ZkUtils.class);
diff --git
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/collections/SynchronizedHashMultiMap.java
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/collections/SynchronizedHashMultiMap.java
index 942d6da281..db7bd0bf53 100644
---
a/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/collections/SynchronizedHashMultiMap.java
+++
b/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/collections/SynchronizedHashMultiMap.java
@@ -33,7 +33,7 @@ import org.apache.commons.lang3.tuple.Pair;
*
* <p>Implementation is aimed at storing PerChannelBookieClient completions
when there
* are duplicates. If the key is a pooled object, it must not exist once the
value
- * has been removed from the map, which can happen with guava multimap
implemenations.
+ * has been removed from the map, which can happen with guava multimap
implementations.
*
* <p>With this map is implemented with pretty heavy locking, but this
shouldn't be an
* issue as the multimap only needs to be used in rare cases, i.e. when a user
tries
diff --git
a/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/HandleFailuresTest.java
b/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/HandleFailuresTest.java
index 8e8f5649da..d1182668e9 100644
---
a/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/HandleFailuresTest.java
+++
b/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/HandleFailuresTest.java
@@ -171,7 +171,7 @@ public class HandleFailuresTest {
}
@Test(timeout = 30000)
- public void
testHandlingFailuresMultipleBookieFailImmediatelyNotEnoughToReplace() throws
Exception {
+ public void
testHandlingFailuresMultipleBookieFailImmediatelyNotEnoughoReplace() throws
Exception {
MockClientContext clientCtx = MockClientContext.create();
Versioned<LedgerMetadata> md = ClientUtil.setupLedger(clientCtx, 10L,
LedgerMetadataBuilder.create()
@@ -197,7 +197,7 @@ public class HandleFailuresTest {
}
@Test(timeout = 30000)
- public void
testHandlingFailuresMultipleBookieFailAfterOneEntryNotEnoughToReplace() throws
Exception {
+ public void
testHandlingFailuresMultipleBookieFailAfterOneEntryNotEnoughoReplace() throws
Exception {
MockClientContext clientCtx = MockClientContext.create();
Versioned<LedgerMetadata> md = ClientUtil.setupLedger(clientCtx, 10L,
LedgerMetadataBuilder.create()
diff --git
a/bookkeeper-server/src/test/java/org/apache/bookkeeper/util/StaticDNSResolver.java
b/bookkeeper-server/src/test/java/org/apache/bookkeeper/util/StaticDNSResolver.java
index 72e61212fb..07da660a87 100644
---
a/bookkeeper-server/src/test/java/org/apache/bookkeeper/util/StaticDNSResolver.java
+++
b/bookkeeper-server/src/test/java/org/apache/bookkeeper/util/StaticDNSResolver.java
@@ -73,7 +73,7 @@ public class StaticDNSResolver extends
AbstractDNSToSwitchMapping implements Rac
@Override
public List<String> resolve(List<String> names) {
if (getBookieAddressResolver() == null) {
- // test that this istance has been properly initialized
+ // test that this instance has been properly initialized
throw new IllegalStateException("bookieAddressResolver was not
set");
}
List<String> racks = new ArrayList<String>();
diff --git a/conf/bk_server.conf b/conf/bk_server.conf
index ebedf206eb..555e92034c 100644
--- a/conf/bk_server.conf
+++ b/conf/bk_server.conf
@@ -419,14 +419,14 @@ journalDirectories=/tmp/bk-txn
# @Deprecated - `sortedLedgerStorageEnabled` is deprecated in favor of using
`ledgerStorageClass`
# Whether sorted-ledger storage enabled (default true)
-# sortedLedgerStorageEnabled=ture
+# sortedLedgerStorageEnabled=true
# Directory Bookkeeper outputs ledger snapshots
# could define multi directories to store snapshots, separated by ','
# For example:
# ledgerDirectories=/tmp/bk1-data,/tmp/bk2-data
#
-# Ideally ledger dirs and journal dir are each in a differet device,
+# Ideally ledger dirs and journal dir are each in a different device,
# which reduce the contention between random i/o and sequential write.
# It is possible to run with a single disk, but performance will be
significantly lower.
ledgerDirectories=/tmp/bk-data
@@ -636,7 +636,7 @@ gcEntryLogMetadataCacheEnabled=false
# For each ledger dir, maximum disk space which can be used.
# Default is 0.95f. i.e. 95% of disk can be used at most after which nothing
will
-# be written to that partition. If all ledger dir partions are full, then
bookie
+# be written to that partition. If all ledger dir partitions are full, then
bookie
# will turn to readonly mode if 'readOnlyModeEnabled=true' is set, else it will
# shutdown. Bookie will also suspend the minor and major compaction when usage
threshold is exceed
# if `isForceGCAllowWhenNoSpace` is disabled. When the usage becomes lower
than the threshold, the major and minor
@@ -715,7 +715,7 @@ gcEntryLogMetadataCacheEnabled=false
# Size of a index page in ledger cache, in bytes
# A larger index page can improve performance writing page to disk,
-# which is efficent when you have small number of ledgers and these
+# which is efficient when you have small number of ledgers and these
# ledgers have similar number of entries.
# If you have large number of ledgers and each ledger has fewer entries,
# smaller index page would improve memory usage.
@@ -728,7 +728,7 @@ gcEntryLogMetadataCacheEnabled=false
# pageLimit*pageSize should not more than JVM max memory limitation,
# otherwise you would got OutOfMemoryException.
# In general, incrementing pageLimit, using smaller index page would
-# gain bettern performance in lager number of ledgers with fewer entries case
+# gain better performance in lager number of ledgers with fewer entries case
# If pageLimit is -1, bookie server will use 1/3 of JVM memory to compute
# the limitation of number of index pages.
# pageLimit=-1
diff --git a/conf/zookeeper.conf b/conf/zookeeper.conf
index 89c985194c..77e3524079 100644
--- a/conf/zookeeper.conf
+++ b/conf/zookeeper.conf
@@ -58,7 +58,7 @@ maxClientCnxns=100
electionAlg=3
# Leader accepts client connections. Default value is "yes". The leader
-# machine coordinates updates. For higher update throughput at thes slight
+# machine coordinates updates. For higher update throughput at these slight
# expense of read throughput the leader can be configured to not accept
# clients and focus on coordination.
leaderServes=yes
diff --git a/src/owasp-dependency-check-suppressions.xml
b/src/owasp-dependency-check-suppressions.xml
index a141bb39be..337506b241 100644
--- a/src/owasp-dependency-check-suppressions.xml
+++ b/src/owasp-dependency-check-suppressions.xml
@@ -20,7 +20,7 @@
-->
<suppressions
xmlns="https://jeremylong.github.io/DependencyCheck/dependency-suppression.1.3.xsd">
- <!-- add supressions for known vulnerabilities detected by OWASP
Dependency Check -->
+ <!-- add suppressions for known vulnerabilities detected by OWASP
Dependency Check -->
<suppress>
<notes>CVE-2021-43045 affects only .NET distro, see
https://github.com/apache/avro/pull/1357</notes>
diff --git
a/stats/bookkeeper-stats-providers/codahale-metrics-provider/src/main/java/org/apache/bookkeeper/stats/codahale/FastTimer.java
b/stats/bookkeeper-stats-providers/codahale-metrics-provider/src/main/java/org/apache/bookkeeper/stats/codahale/FastTimer.java
index f3a3374f35..e7a65afb3a 100644
---
a/stats/bookkeeper-stats-providers/codahale-metrics-provider/src/main/java/org/apache/bookkeeper/stats/codahale/FastTimer.java
+++
b/stats/bookkeeper-stats-providers/codahale-metrics-provider/src/main/java/org/apache/bookkeeper/stats/codahale/FastTimer.java
@@ -163,7 +163,7 @@ public class FastTimer extends Timer {
/*
* Buckets for percentiles store response times according to the
definition in BUCKET_SPEC in the
- * form of { numerOfBuckets , nanosecondResolutionPerBucket }.
+ * form of { numberOfBuckets , nanosecondResolutionPerBucket }.
*
* BUCKET_SPEC_FINE:
* This bucket definition provides fine-grained timing for small values,
and more coarse-grained timing
diff --git
a/stats/bookkeeper-stats-providers/prometheus-metrics-provider/src/main/java/org/apache/bookkeeper/stats/prometheus/DataSketchesOpStatsLogger.java
b/stats/bookkeeper-stats-providers/prometheus-metrics-provider/src/main/java/org/apache/bookkeeper/stats/prometheus/DataSketchesOpStatsLogger.java
index 057fed65c1..1abe85088c 100644
---
a/stats/bookkeeper-stats-providers/prometheus-metrics-provider/src/main/java/org/apache/bookkeeper/stats/prometheus/DataSketchesOpStatsLogger.java
+++
b/stats/bookkeeper-stats-providers/prometheus-metrics-provider/src/main/java/org/apache/bookkeeper/stats/prometheus/DataSketchesOpStatsLogger.java
@@ -145,12 +145,12 @@ public class DataSketchesOpStatsLogger implements
OpStatsLogger {
current = replacement;
replacement = local;
- final DoublesUnion aggregateSuccesss = new
DoublesUnionBuilder().build();
+ final DoublesUnion aggregateSuccess = new
DoublesUnionBuilder().build();
final DoublesUnion aggregateFail = new DoublesUnionBuilder().build();
local.map.forEach((localData, b) -> {
long stamp = localData.lock.writeLock();
try {
- aggregateSuccesss.update(localData.successSketch);
+ aggregateSuccess.update(localData.successSketch);
localData.successSketch.reset();
aggregateFail.update(localData.failSketch);
localData.failSketch.reset();
@@ -159,7 +159,7 @@ public class DataSketchesOpStatsLogger implements
OpStatsLogger {
}
});
- successResult = aggregateSuccesss.getResultAndReset();
+ successResult = aggregateSuccess.getResultAndReset();
failResult = aggregateFail.getResultAndReset();
}
diff --git
a/stream/api/src/main/java/org/apache/bookkeeper/api/kv/result/DeleteResult.java
b/stream/api/src/main/java/org/apache/bookkeeper/api/kv/result/DeleteResult.java
index 5950c7e1e7..e49ed1f290 100644
---
a/stream/api/src/main/java/org/apache/bookkeeper/api/kv/result/DeleteResult.java
+++
b/stream/api/src/main/java/org/apache/bookkeeper/api/kv/result/DeleteResult.java
@@ -29,7 +29,7 @@ public interface DeleteResult<K, V> extends Result<K, V> {
/**
* Returns the list of previous kv pairs of the keys
- * deleted in ths op.
+ * deleted in this op.
*
* @return the list of previous kv pairs.
*/
diff --git
a/stream/clients/java/all/src/main/java/org/apache/bookkeeper/clients/StorageClientBuilder.java
b/stream/clients/java/all/src/main/java/org/apache/bookkeeper/clients/StorageClientBuilder.java
index f873e88a7c..572282e74d 100644
---
a/stream/clients/java/all/src/main/java/org/apache/bookkeeper/clients/StorageClientBuilder.java
+++
b/stream/clients/java/all/src/main/java/org/apache/bookkeeper/clients/StorageClientBuilder.java
@@ -67,7 +67,7 @@ public class StorageClientBuilder implements
Supplier<StorageClient> {
* <p>The namespace name will be used for building the stream client for
interacting with streams
* within the namespace.
*
- * @param colName colletion name
+ * @param colName collection name
* @return stream client builder.
* @see #build()
*/
diff --git
a/stream/clients/java/kv/src/main/java/org/apache/bookkeeper/clients/impl/kv/PByteBufTableImpl.java
b/stream/clients/java/kv/src/main/java/org/apache/bookkeeper/clients/impl/kv/PByteBufTableImpl.java
index 1bda9716b3..8b84d96e14 100644
---
a/stream/clients/java/kv/src/main/java/org/apache/bookkeeper/clients/impl/kv/PByteBufTableImpl.java
+++
b/stream/clients/java/kv/src/main/java/org/apache/bookkeeper/clients/impl/kv/PByteBufTableImpl.java
@@ -53,7 +53,7 @@ import org.apache.bookkeeper.common.util.Backoff;
import org.apache.bookkeeper.stream.proto.StreamProperties;
/**
- * The default implemenation of {@link PTable}.
+ * The default implementation of {@link PTable}.
*/
@Slf4j
public class PByteBufTableImpl implements PTable<ByteBuf, ByteBuf> {
diff --git
a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/BKAsyncLogWriter.java
b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/BKAsyncLogWriter.java
index 31cbab62d3..da7723a383 100644
---
a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/BKAsyncLogWriter.java
+++
b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/BKAsyncLogWriter.java
@@ -98,9 +98,9 @@ class BKAsyncLogWriter extends BKAbstractLogWriter implements
AsyncLogWriter {
}
/**
- * Last pending record in current log segment. After it is satisified, it
would
+ * Last pending record in current log segment. After it is satisfied, it
would
* roll log segment.
- * This implementation is based on the assumption that all future
satisified in same
+ * This implementation is based on the assumption that all future
satisfied in same
* order future pool.
*/
class LastPendingLogRecord extends PendingLogRecord {
diff --git
a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/BKLogReadHandler.java
b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/BKLogReadHandler.java
index 777e6b1c76..167915a7f0 100644
---
a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/BKLogReadHandler.java
+++
b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/BKLogReadHandler.java
@@ -85,7 +85,7 @@ import org.slf4j.LoggerFactory;
* <li> `readahead_worker`/notification_execution: opstats. stats on
executions over the notifications received from
* zookeeper.
* <li> `readahead_worker`/metadata_reinitialization: opstats. stats on
metadata reinitialization after receiving
- * notifcation from log segments updates.
+ * notification from log segments updates.
* <li> `readahead_worker`/idle_reader_warn: counter. it increases each time
the readahead worker detects itself
* becoming idle.
* </ul>
diff --git
a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/BKLogSegmentWriter.java
b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/BKLogSegmentWriter.java
index 34c855a78f..fdf34e275d 100644
---
a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/BKLogSegmentWriter.java
+++
b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/BKLogSegmentWriter.java
@@ -1298,7 +1298,7 @@ class BKLogSegmentWriter implements LogSegmentWriter,
AddCallback, Runnable, Siz
}
}
- // update last dlsn before satisifying future
+ // update last dlsn before satisfying future
if (BKException.Code.OK == transmitResultUpdater.get(this)) {
DLSN lastDLSNInPacket = recordSet.finalizeTransmit(
logSegmentSequenceNumber, entryId);
diff --git
a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/DistributedLogConfiguration.java
b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/DistributedLogConfiguration.java
index f6d84a908b..7a98fc0135 100644
---
a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/DistributedLogConfiguration.java
+++
b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/DistributedLogConfiguration.java
@@ -782,7 +782,7 @@ public class DistributedLogConfiguration extends
CompositeConfiguration {
//
/**
- * Get BK's zookeeper session timout in milliseconds.
+ * Get BK's zookeeper session timeout in milliseconds.
*
* <p>This is the session timeout applied for zookeeper client used by
bookkeeper client.
* Use {@link #getZKSessionTimeoutMilliseconds()} for zookeeper client used
@@ -1399,7 +1399,7 @@ public class DistributedLogConfiguration extends
CompositeConfiguration {
* Get timeout for shutting down schedulers in dl manager, in milliseconds.
* By default, it is 5 seconds.
*
- * @return timeout for shutting down schedulers in dl manager, in
miliseconds.
+ * @return timeout for shutting down schedulers in dl manager, in
milliseconds.
*/
public int getSchedulerShutdownTimeoutMs() {
return getInt(BKDL_SCHEDULER_SHUTDOWN_TIMEOUT_MS,
BKDL_SCHEDULER_SHUTDOWN_TIMEOUT_MS_DEFAULT);
@@ -1647,7 +1647,7 @@ public class DistributedLogConfiguration extends
CompositeConfiguration {
* </ul>
* By default it is 1.
*
- * @return log segment name verison.
+ * @return log segment name version.
*/
public int getLogSegmentNameVersion() {
return getInt(BKDL_LOGSEGMENT_NAME_VERSION,
BKDL_LOGSEGMENT_NAME_VERSION_DEFAULT);
@@ -3264,7 +3264,7 @@ public class DistributedLogConfiguration extends
CompositeConfiguration {
}
/**
- * Enable check existence of a log if quering local cache of a federated
namespace missed.
+ * Enable check existence of a log if querying local cache of a federated
namespace missed.
*
* @param enabled
* flag to enable/disable this feature.
@@ -3585,7 +3585,7 @@ public class DistributedLogConfiguration extends
CompositeConfiguration {
long readerIdleWarnThresholdMs = getReaderIdleWarnThresholdMillis();
if (readerIdleWarnThresholdMs > 0) { // NOTE: some test cases set the
idle warn threshold to 0
checkArgument(readerIdleWarnThresholdMs > 2 *
getReadLACLongPollTimeout(),
- "Invalid configuration: ReaderIdleWarnThreshold should be
2x larget than readLACLongPollTimeout");
+ "Invalid configuration: ReaderIdleWarnThreshold should be
2x larger than readLACLongPollTimeout");
}
}
diff --git
a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/ZooKeeperClient.java
b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/ZooKeeperClient.java
index 91af155a46..ff23ff0782 100644
---
a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/ZooKeeperClient.java
+++
b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/ZooKeeperClient.java
@@ -212,7 +212,7 @@ public class ZooKeeperClient {
+ name + " failed on establishing zookeeper connection",
ioe);
}
- // This indicates that the client was explictly closed
+ // This indicates that the client was explicitly closed
if (closed) {
throw new ZooKeeperConnectionException("Client " + name + " has
already been closed");
}
diff --git
a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/ZooKeeperClientBuilder.java
b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/ZooKeeperClientBuilder.java
index 755cd82466..716cbc6e01 100644
---
a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/ZooKeeperClientBuilder.java
+++
b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/ZooKeeperClientBuilder.java
@@ -51,8 +51,8 @@ public class ZooKeeperClientBuilder {
private String name = "default";
// sessionTimeoutMs
private int sessionTimeoutMs = -1;
- // conectionTimeoutMs
- private int conectionTimeoutMs = -1;
+ // connectionTimeoutMs
+ private int connectionTimeoutMs = -1;
// zkServers
private String zkServers = null;
// retry policy
@@ -92,8 +92,8 @@ public class ZooKeeperClientBuilder {
*/
public synchronized ZooKeeperClientBuilder sessionTimeoutMs(int
sessionTimeoutMs) {
this.sessionTimeoutMs = sessionTimeoutMs;
- if (this.conectionTimeoutMs <= 0) {
- this.conectionTimeoutMs = 2 * sessionTimeoutMs;
+ if (this.connectionTimeoutMs <= 0) {
+ this.connectionTimeoutMs = 2 * sessionTimeoutMs;
}
return this;
}
@@ -116,7 +116,7 @@ public class ZooKeeperClientBuilder {
* @return builder
*/
public synchronized ZooKeeperClientBuilder connectionTimeoutMs(int
connectionTimeoutMs) {
- this.conectionTimeoutMs = connectionTimeoutMs;
+ this.connectionTimeoutMs = connectionTimeoutMs;
return this;
}
@@ -191,8 +191,8 @@ public class ZooKeeperClientBuilder {
private void validateParameters() {
checkNotNull(zkServers, "No zk servers provided.");
- checkArgument(conectionTimeoutMs > 0,
- "Invalid connection timeout : %d", conectionTimeoutMs);
+ checkArgument(connectionTimeoutMs > 0,
+ "Invalid connection timeout : %d", connectionTimeoutMs);
checkArgument(sessionTimeoutMs > 0,
"Invalid session timeout : %d", sessionTimeoutMs);
checkNotNull(statsLogger, "No stats logger provided.");
@@ -222,7 +222,7 @@ public class ZooKeeperClientBuilder {
return new ZooKeeperClient(
name,
sessionTimeoutMs,
- conectionTimeoutMs,
+ connectionTimeoutMs,
zkServers,
retryPolicy,
statsLogger,
diff --git
a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/api/namespace/Namespace.java
b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/api/namespace/Namespace.java
index 1b2a7873e2..f018d8ce0a 100644
---
a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/api/namespace/Namespace.java
+++
b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/api/namespace/Namespace.java
@@ -139,7 +139,7 @@ public interface Namespace extends AutoCloseable{
*
* <p>This method allows the caller to override global configuration
settings by
* supplying log configuration overrides. Log config overrides come in two
flavors,
- * static and dynamic. Static config never changes in the lifecyle of
<code>DistributedLogManager</code>,
+ * static and dynamic. Static config never changes in the lifecycle of
<code>DistributedLogManager</code>,
* dynamic config changes by reloading periodically and safe to access
from any context.</p>
*
* @param logName
diff --git
a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/impl/acl/ZKAccessControlManager.java
b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/impl/acl/ZKAccessControlManager.java
index 9ca3efc6cf..5906226407 100644
---
a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/impl/acl/ZKAccessControlManager.java
+++
b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/impl/acl/ZKAccessControlManager.java
@@ -185,7 +185,7 @@ public class ZKAccessControlManager implements
AccessControlManager, Watcher {
@Override
public void onSuccess(ZKAccessControl
accessControl) {
streamEntries.put(streamName,
accessControl);
- logger.info("Added overrided access
control for stream {} : {}",
+ logger.info("Added override access
control for stream {} : {}",
streamName,
accessControl.getAccessControlEntry());
complete();
}
diff --git
a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/impl/federated/FederatedZKLogMetadataStore.java
b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/impl/federated/FederatedZKLogMetadataStore.java
index 028d329e40..79a746e743 100644
---
a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/impl/federated/FederatedZKLogMetadataStore.java
+++
b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/impl/federated/FederatedZKLogMetadataStore.java
@@ -139,7 +139,7 @@ public class FederatedZKLogMetadataStore
try {
oldLogs = FutureUtils.result(logsFuture);
} catch (Exception e) {
- logger.error("Unexpected exception when getting logs
from a satisified future of {} : ",
+ logger.error("Unexpected exception when getting logs
from a satisfied future of {} : ",
uri, e);
}
logsFuture = new CompletableFuture<Set<String>>();
diff --git
a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/lock/ZKDistributedLock.java
b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/lock/ZKDistributedLock.java
index 4d4c499eeb..3fc32d54c3 100644
---
a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/lock/ZKDistributedLock.java
+++
b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/lock/ZKDistributedLock.java
@@ -313,7 +313,7 @@ public class ZKDistributedLock implements LockListener,
DistributedLock {
/**
* Check if lock is held.
* If not, error out and do not reacquire. Use this in cases where there
are many waiters by default
- * and reacquire is unlikley to succeed.
+ * and reacquire is unlikely to succeed.
*
* @throws LockingException if the lock attempt fails
*/
diff --git
a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/lock/ZKSessionLock.java
b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/lock/ZKSessionLock.java
index 7d7a292681..a0ce6b5b9f 100644
---
a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/lock/ZKSessionLock.java
+++
b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/lock/ZKSessionLock.java
@@ -74,7 +74,7 @@ import org.slf4j.LoggerFactory;
* 1. prepare: create a sequential znode to identify the lock.
* 2. check lock waiters: get all lock waiters to check after prepare. if it
is the first waiter, claim the ownership;
* if it is not the first waiter, but first waiter was itself (same client
id and same session id)
- * claim the ownership too; otherwise, it would set watcher on its sibling
and wait it to disappared.
+ * claim the ownership too; otherwise, it would set watcher on its sibling
and wait it to disappeared.
* </p>
*
* <pre>
@@ -187,7 +187,7 @@ class ZKSessionLock implements SessionLock {
}
/**
- * Convenience class for state management. Provide debuggability features
by tracing unxpected state
+ * Convenience class for state management. Provide debuggability features
by tracing unexpected state
* transitions.
*/
static class StateManagement {
@@ -860,7 +860,7 @@ class ZKSessionLock implements SessionLock {
CompletableFuture<Void> asyncUnlock(final Throwable cause) {
final CompletableFuture<Void> promise = new CompletableFuture<Void>();
- // Use lock executor here rather than lock action, because we want
this opertaion to be applied
+ // Use lock executor here rather than lock action, because we want
this operation to be applied
// whether the epoch has changed or not. The member node is
EPHEMERAL_SEQUENTIAL so there's no
// risk of an ABA problem where we delete and recreate a node and then
delete it again here.
lockStateExecutor.executeOrdered(lockPath, () -> {
diff --git
a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/logsegment/LogSegmentEntryReader.java
b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/logsegment/LogSegmentEntryReader.java
index decae9a850..2ec7445466 100644
---
a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/logsegment/LogSegmentEntryReader.java
+++
b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/logsegment/LogSegmentEntryReader.java
@@ -84,7 +84,7 @@ public interface LogSegmentEntryReader extends AsyncCloseable
{
* <p><i>numEntries</i> will be best-effort.
*
* @param numEntries num entries to read from current log segment
- * @return A promise that when satisified will contain a non-empty list of
entries with their content.
+ * @return A promise that when satisfied will contain a non-empty list of
entries with their content.
* @throw {@link
org.apache.distributedlog.exceptions.EndOfLogSegmentException} when
* read entries beyond the end of a <i>closed</i> log segment.
*/
diff --git
a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/namespace/NamespaceWatcher.java
b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/namespace/NamespaceWatcher.java
index 49da3d4f75..e977040a46 100644
---
a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/namespace/NamespaceWatcher.java
+++
b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/namespace/NamespaceWatcher.java
@@ -53,7 +53,7 @@ public abstract class NamespaceWatcher {
/**
* Watch the namespace changes. It would be triggered each time
- * a namspace listener is added. The implementation should handle
+ * a namespace listener is added. The implementation should handle
* this.
*/
protected abstract void watchNamespaceChanges();
diff --git
a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/tools/DistributedLogTool.java
b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/tools/DistributedLogTool.java
index 9af7f6dff3..6588fc27c8 100644
---
a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/tools/DistributedLogTool.java
+++
b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/tools/DistributedLogTool.java
@@ -916,8 +916,8 @@ import org.slf4j.LoggerFactory;
super("show", "show metadata of a given stream and list segments");
options.addOption("ns", "no-log-segments", false, "Do not list log
segment metadata");
options.addOption("lp", "placement-stats", false, "Show ensemble
placement stats");
- options.addOption("fl", "first-ledger", true, "First log sement
no");
- options.addOption("ll", "last-ledger", true, "Last log sement no");
+ options.addOption("fl", "first-ledger", true, "First log segment
no");
+ options.addOption("ll", "last-ledger", true, "Last log segment
no");
}
@Override
diff --git
a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/zk/DefaultZKOp.java
b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/zk/DefaultZKOp.java
index 22ae7cb9fd..75dab1650e 100644
---
a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/zk/DefaultZKOp.java
+++
b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/zk/DefaultZKOp.java
@@ -24,7 +24,7 @@ import org.apache.zookeeper.OpResult;
/**
- * Default zookeeper operation. No action on commiting or aborting.
+ * Default zookeeper operation. No action on committing or aborting.
*/
public class DefaultZKOp extends ZKOp {
diff --git
a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/zk/ZKWatcherManager.java
b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/zk/ZKWatcherManager.java
index 1aabd59636..afd06c2f2a 100644
---
a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/zk/ZKWatcherManager.java
+++
b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/zk/ZKWatcherManager.java
@@ -82,9 +82,9 @@ public class ZKWatcherManager implements Watcher {
private final StatsLogger statsLogger;
// Gauges and their labels
private final Gauge<Number> totalWatchesGauge;
- private static final String totalWatchesGauageLabel = "total_watches";
+ private static final String totalWatchesGaugeLabel = "total_watches";
private final Gauge<Number> numChildWatchesGauge;
- private static final String numChildWatchesGauageLabel =
"num_child_watches";
+ private static final String numChildWatchesGaugeLabel =
"num_child_watches";
protected final ConcurrentMap<String, Set<Watcher>> childWatches;
protected final LongAdder allWatchesGauge;
@@ -112,7 +112,7 @@ public class ZKWatcherManager implements Watcher {
return allWatchesGauge.sum();
}
};
- this.statsLogger.registerGauge(totalWatchesGauageLabel,
totalWatchesGauge);
+ this.statsLogger.registerGauge(totalWatchesGaugeLabel,
totalWatchesGauge);
numChildWatchesGauge = new Gauge<Number>() {
@Override
@@ -126,7 +126,7 @@ public class ZKWatcherManager implements Watcher {
}
};
- this.statsLogger.registerGauge(numChildWatchesGauageLabel,
numChildWatchesGauge);
+ this.statsLogger.registerGauge(numChildWatchesGaugeLabel,
numChildWatchesGauge);
}
public Watcher registerChildWatcher(String path, Watcher watcher) {
@@ -169,8 +169,8 @@ public class ZKWatcherManager implements Watcher {
}
public void unregisterGauges() {
- this.statsLogger.unregisterGauge(totalWatchesGauageLabel,
totalWatchesGauge);
- this.statsLogger.unregisterGauge(numChildWatchesGauageLabel,
numChildWatchesGauge);
+ this.statsLogger.unregisterGauge(totalWatchesGaugeLabel,
totalWatchesGauge);
+ this.statsLogger.unregisterGauge(numChildWatchesGaugeLabel,
numChildWatchesGauge);
}
@Override
diff --git a/stream/distributedlog/core/src/test/resources/bk_server.conf
b/stream/distributedlog/core/src/test/resources/bk_server.conf
index 0d3cd56d4c..f094ef36b7 100644
--- a/stream/distributedlog/core/src/test/resources/bk_server.conf
+++ b/stream/distributedlog/core/src/test/resources/bk_server.conf
@@ -94,7 +94,7 @@ openFileLimit=20000
# Size of a index page in ledger cache, in bytes
# A larger index page can improve performance writing page to disk,
-# which is efficent when you have small number of ledgers and these
+# which is efficient when you have small number of ledgers and these
# ledgers have similar number of entries.
# If you have large number of ledgers and each ledger has fewer entries,
# smaller index page would improve memory usage.
@@ -107,7 +107,7 @@ pageSize=8192
# pageLimit*pageSize should not more than JVM max memory limitation,
# otherwise you would got OutOfMemoryException.
# In general, incrementing pageLimit, using smaller index page would
-# gain bettern performance in lager number of ledgers with fewer entries case
+# gain better performance in lager number of ledgers with fewer entries case
# If pageLimit is -1, bookie server will use 1/3 of JVM memory to compute
# the limitation of number of index pages.
pageLimit=131072
diff --git
a/stream/distributedlog/protocol/src/main/java/org/apache/distributedlog/LogRecord.java
b/stream/distributedlog/protocol/src/main/java/org/apache/distributedlog/LogRecord.java
index 4de2249677..7101420675 100644
---
a/stream/distributedlog/protocol/src/main/java/org/apache/distributedlog/LogRecord.java
+++
b/stream/distributedlog/protocol/src/main/java/org/apache/distributedlog/LogRecord.java
@@ -509,7 +509,7 @@ public class LogRecord {
try {
long metadata = in.readLong();
// Reading the first 8 bytes positions the record stream
on the correct log record
- // By this time all components of the DLSN are valid so
this is where we shoud
+ // By this time all components of the DLSN are valid so
this is where we should
// retrieve the currentDLSN and advance to the next
// Given that there are 20 bytes following the read
position of the previous call
// to readLong, we should not have moved ahead in the
stream.
diff --git a/stream/proto/src/main/proto/stream.proto
b/stream/proto/src/main/proto/stream.proto
index b16bd22f29..accab87f2d 100644
--- a/stream/proto/src/main/proto/stream.proto
+++ b/stream/proto/src/main/proto/stream.proto
@@ -83,7 +83,7 @@ message ParentRangesList {
// Stream
//
-// since stream and table are similar and exchangable,
+// since stream and table are similar and exchangeable,
// from the beginning, we shared the metadata management
// between streams and tables and distinguish them using
// a flag that recorded in metadata.
diff --git
a/stream/server/src/main/java/org/apache/bookkeeper/stream/server/service/RegistrationStateService.java
b/stream/server/src/main/java/org/apache/bookkeeper/stream/server/service/RegistrationStateService.java
index afe33accc0..8d5ad8b007 100644
---
a/stream/server/src/main/java/org/apache/bookkeeper/stream/server/service/RegistrationStateService.java
+++
b/stream/server/src/main/java/org/apache/bookkeeper/stream/server/service/RegistrationStateService.java
@@ -88,7 +88,7 @@ public class RegistrationStateService
log.info("Successfully register myself under registration path
{}/{}",
regServiceProvider.getRegistrationPath(),
NetUtils.endpointToString(myEndpoint));
} catch (Exception e) {
- throw new RuntimeException("Failed to intiailize a
registration state service", e);
+ throw new RuntimeException("Failed to initialize a
registration state service", e);
}
}
}
diff --git
a/stream/statelib/src/main/java/org/apache/bookkeeper/statelib/impl/mvcc/MVCCRecord.java
b/stream/statelib/src/main/java/org/apache/bookkeeper/statelib/impl/mvcc/MVCCRecord.java
index 4e071c3625..c8a2c0d983 100644
---
a/stream/statelib/src/main/java/org/apache/bookkeeper/statelib/impl/mvcc/MVCCRecord.java
+++
b/stream/statelib/src/main/java/org/apache/bookkeeper/statelib/impl/mvcc/MVCCRecord.java
@@ -33,7 +33,7 @@ import org.apache.bookkeeper.common.util.Recycled;
import org.apache.bookkeeper.stream.proto.kv.store.ValueType;
/**
- * An object represents the mvcc metdata and value for a given key.
+ * An object represents the mvcc metadata and value for a given key.
*/
@Data
@Setter
diff --git
a/stream/statelib/src/main/java/org/apache/bookkeeper/statelib/impl/rocksdb/checkpoint/CheckpointInfo.java
b/stream/statelib/src/main/java/org/apache/bookkeeper/statelib/impl/rocksdb/checkpoint/CheckpointInfo.java
index 49d06c956c..f38bf28b38 100644
---
a/stream/statelib/src/main/java/org/apache/bookkeeper/statelib/impl/rocksdb/checkpoint/CheckpointInfo.java
+++
b/stream/statelib/src/main/java/org/apache/bookkeeper/statelib/impl/rocksdb/checkpoint/CheckpointInfo.java
@@ -38,7 +38,7 @@ import
org.apache.bookkeeper.stream.proto.kv.store.CheckpointMetadata;
/**
- * CheckpointInfo encapsulated information and operatation for a checkpoint.
+ * CheckpointInfo encapsulated information and operation for a checkpoint.
*/
@Slf4j
public class CheckpointInfo implements Comparable<CheckpointInfo> {
diff --git
a/stream/storage/api/src/main/java/org/apache/bookkeeper/stream/storage/api/cluster/ClusterInitializer.java
b/stream/storage/api/src/main/java/org/apache/bookkeeper/stream/storage/api/cluster/ClusterInitializer.java
index 5808186bbe..f938dee7f7 100644
---
a/stream/storage/api/src/main/java/org/apache/bookkeeper/stream/storage/api/cluster/ClusterInitializer.java
+++
b/stream/storage/api/src/main/java/org/apache/bookkeeper/stream/storage/api/cluster/ClusterInitializer.java
@@ -31,10 +31,10 @@ public interface ClusterInitializer {
* return <tt>true</tt> if they understand the subprotocol specified in
the URI and
* <tt>false</tt> if they do not.
*
- * @param metatadataServiceUri the metadata service uri
+ * @param metadataServiceUri the metadata service uri
* @return <tt>true</tt> if the implementation understands the given URI;
<tt>false</tt> otherwise.
*/
- boolean acceptsURI(URI metatadataServiceUri);
+ boolean acceptsURI(URI metadataServiceUri);
/**
* Create a new cluster under metadata service specified by {@code
metadataServiceUri}.
diff --git
a/stream/storage/impl/src/main/java/org/apache/bookkeeper/stream/storage/impl/cluster/ZkClusterMetadataStore.java
b/stream/storage/impl/src/main/java/org/apache/bookkeeper/stream/storage/impl/cluster/ZkClusterMetadataStore.java
index 676da4e339..0d2fd45c47 100644
---
a/stream/storage/impl/src/main/java/org/apache/bookkeeper/stream/storage/impl/cluster/ZkClusterMetadataStore.java
+++
b/stream/storage/impl/src/main/java/org/apache/bookkeeper/stream/storage/impl/cluster/ZkClusterMetadataStore.java
@@ -98,7 +98,7 @@ public class ZkClusterMetadataStore implements
ClusterMetadataStore {
ClusterMetadata metadata = ClusterMetadata.newBuilder()
.setNumStorageContainers(numStorageContainers)
.build();
- ClusterAssignmentData assigmentData =
ClusterAssignmentData.newBuilder()
+ ClusterAssignmentData assignmentData =
ClusterAssignmentData.newBuilder()
.build();
try {
// we are using dlog for the storage backend, so we need to
initialize the dlog namespace
@@ -110,7 +110,7 @@ public class ZkClusterMetadataStore implements
ClusterMetadataStore {
.forOperations(
client.transactionOp().create().forPath(zkRootPath),
client.transactionOp().create().forPath(zkClusterMetadataPath,
metadata.toByteArray()),
-
client.transactionOp().create().forPath(zkClusterAssignmentPath,
assigmentData.toByteArray()),
+
client.transactionOp().create().forPath(zkClusterAssignmentPath,
assignmentData.toByteArray()),
client.transactionOp().create().forPath(getServersPath(zkRootPath)),
client.transactionOp().create().forPath(getWritableServersPath(zkRootPath)),
client.transactionOp().create().forPath(getStoragePath(zkRootPath),
dlogMetadata.serialize()));
@@ -141,8 +141,8 @@ public class ZkClusterMetadataStore implements
ClusterMetadataStore {
}
@Override
- public void updateClusterAssignmentData(ClusterAssignmentData
assigmentData) {
- byte[] data = assigmentData.toByteArray();
+ public void updateClusterAssignmentData(ClusterAssignmentData
assignmentData) {
+ byte[] data = assignmentData.toByteArray();
try {
client.setData().forPath(zkClusterAssignmentPath, data);
} catch (Exception e) {
diff --git
a/tools/perf/src/main/java/org/apache/bookkeeper/tools/perf/table/PerfClient.java
b/tools/perf/src/main/java/org/apache/bookkeeper/tools/perf/table/PerfClient.java
index f8d7c2d02d..20b9422f21 100644
---
a/tools/perf/src/main/java/org/apache/bookkeeper/tools/perf/table/PerfClient.java
+++
b/tools/perf/src/main/java/org/apache/bookkeeper/tools/perf/table/PerfClient.java
@@ -144,7 +144,7 @@ public class PerfClient implements Runnable {
names = {
"-b", "--benchmarks"
},
- description = "List of benchamrks to run")
+ description = "List of benchmarks to run")
public List<String> benchmarks;
}